repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/availability_zones.py | 23 | 7091 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability zone helper functions."""
import collections
from oslo.config import cfg
from nova import objects
from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
availability_zone_opts = [
cfg.StrOpt('internal_service_availability_zone',
default='internal',
help='The availability_zone to show internal services under'),
cfg.StrOpt('default_availability_zone',
default='nova',
help='Default compute node availability_zone'),
]
CONF = cfg.CONF
CONF.register_opts(availability_zone_opts)
def _get_cache():
global MC
if MC is None:
MC = memorycache.get_client()
return MC
def reset_cache():
"""Reset the cache, mainly for testing purposes and update
availability_zone for host aggregate
"""
global MC
MC = None
def _make_cache_key(host):
return "azcache-%s" % host.encode('utf-8')
def _build_metadata_by_host(aggregates, hosts=None):
if hosts and not isinstance(hosts, set):
hosts = set(hosts)
metadata = collections.defaultdict(set)
for aggregate in aggregates:
for host in aggregate.hosts:
if hosts and host not in hosts:
continue
metadata[host].add(aggregate.metadata.values()[0])
return metadata
def _build_metadata_by_key(aggregates):
metadata = collections.defaultdict(set)
for aggregate in aggregates:
for key, value in aggregate.metadata.iteritems():
metadata[key].add(value)
return metadata
def set_availability_zones(context, services):
# Makes sure services isn't a sqlalchemy object
services = [dict(service.iteritems()) for service in services]
hosts = set([service['host'] for service in services])
aggregates = objects.AggregateList.get_by_metadata_key(context,
'availability_zone', hosts=hosts)
metadata = _build_metadata_by_host(aggregates, hosts=hosts)
# gather all of the availability zones associated with a service host
for service in services:
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
# update the cache
update_host_availability_zone_cache(context,
service['host'], az)
service['availability_zone'] = az
return services
def get_host_availability_zone(context, host, conductor_api=None):
if conductor_api:
metadata = conductor_api.aggregate_metadata_get_by_host(
context, host, key='availability_zone')
else:
aggregates = objects.AggregateList.get_by_host(context, host,
key='availability_zone')
metadata = _build_metadata_by_key(aggregates)
if 'availability_zone' in metadata:
az = list(metadata['availability_zone'])[0]
else:
az = CONF.default_availability_zone
return az
def update_host_availability_zone_cache(context, host, availability_zone=None):
if not availability_zone:
availability_zone = get_host_availability_zone(context, host)
cache = _get_cache()
cache_key = _make_cache_key(host)
cache.delete(cache_key)
cache.set(cache_key, availability_zone, AZ_CACHE_SECONDS)
def get_availability_zones(context, get_only_available=False,
with_hosts=False):
"""Return available and unavailable zones on demand.
:param get_only_available: flag to determine whether to return
available zones only, default False indicates return both
available zones and not available zones, True indicates return
available zones only
:param with_hosts: whether to return hosts part of the AZs
:type with_hosts: bool
"""
enabled_services = objects.ServiceList.get_all(context, disabled=False)
enabled_services = set_availability_zones(context, enabled_services)
available_zones = []
for (zone, host) in [(service['availability_zone'], service['host'])
for service in enabled_services]:
if not with_hosts and zone not in available_zones:
available_zones.append(zone)
elif with_hosts:
_available_zones = dict(available_zones)
zone_hosts = _available_zones.setdefault(zone, set())
zone_hosts.add(host)
# .items() returns a view in Py3, casting it to list for Py2 compat
available_zones = list(_available_zones.items())
if not get_only_available:
disabled_services = objects.ServiceList.get_all(context, disabled=True)
disabled_services = set_availability_zones(context, disabled_services)
not_available_zones = []
azs = available_zones if not with_hosts else dict(available_zones)
zones = [(service['availability_zone'], service['host'])
for service in disabled_services
if service['availability_zone'] not in azs]
for (zone, host) in zones:
if not with_hosts and zone not in not_available_zones:
not_available_zones.append(zone)
elif with_hosts:
_not_available_zones = dict(not_available_zones)
zone_hosts = _not_available_zones.setdefault(zone, set())
zone_hosts.add(host)
# .items() returns a view in Py3, casting it to list for Py2
# compat
not_available_zones = list(_not_available_zones.items())
return (available_zones, not_available_zones)
else:
return available_zones
def get_instance_availability_zone(context, instance):
"""Return availability zone of specified instance."""
host = str(instance.get('host'))
if not host:
return None
cache_key = _make_cache_key(host)
cache = _get_cache()
az = cache.get(cache_key)
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
cache.set(cache_key, az, AZ_CACHE_SECONDS)
return az
| gpl-2.0 |
quentinhardy/odat | ExternalTable.py | 1 | 7140 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from DirectoryManagement import DirectoryManagement
import logging, random, string
from Utils import checkOptionsGivenByTheUser
from Constants import *
class ExternalTable (DirectoryManagement):
'''
Allow the user to read file thanks to external tables
'''
def __init__(self,args):
'''
Constructor
'''
logging.debug("ExternalTable object created")
DirectoryManagement.__init__(self,args)
self.tableName = self.__generateRandomString__()
self.__setDirectoryName__()
self.ERROR_EXTERNAL_TABLE_WITH_WRITE = "ORA-30653: "
self.ERROR_EXTERNAL_TABLE_READ ="ORA-29400: "
self.ERROR_ODCIEXTTABLEOPEN="ORA-29913: "
def __createTableForReadFile__(self,remoteNameFile):
'''
Create table name with, for exemple:
CREATE TABLE rf1 (id NUMBER PRIMARY KEY, path VARCHAR(255) UNIQUE, ot_format VARCHAR(6));
'''
logging.info('Create the table: {0}'.format(self.tableName))
query = "CREATE TABLE {0} (line varchar2(256)) ORGANIZATION EXTERNAL (TYPE oracle_loader DEFAULT DIRECTORY {1} ACCESS PARAMETERS ( RECORDS DELIMITED BY NEWLINE BADFILE 'bad_data.bad' NOLOGFILE FIELDS TERMINATED BY ',' MISSING FIELD VALUES ARE NULL REJECT ROWS WITH ALL NULL FIELDS (line)) LOCATION ('{2}')) PARALLEL REJECT LIMIT 0 NOMONITORING".format(self.tableName, self.directoryName, remoteNameFile)
response = self.__execThisQuery__(query=query,isquery=False)
if isinstance(response,Exception) :
logging.info('Error with the SQL request {0}: {1}'.format(query,str(response)))
return response
else : return True
def __createTableForExec__(self,remoteNameFile):
'''
Create a table in order to execute a command
'''
logging.info('Create the table: {0}'.format(self.tableName))
query = """CREATE TABLE {0} ( line NUMBER , text VARCHAR2(4000)) ORGANIZATION EXTERNAL ( TYPE ORACLE_LOADER DEFAULT DIRECTORY {1} ACCESS PARAMETERS ( RECORDS DELIMITED BY NEWLINE NOLOGFILE PREPROCESSOR {1}: '{2}' FIELDS TERMINATED BY WHITESPACE ( line RECNUM , text POSITION(1:4000)) ) LOCATION ('{2}') ) REJECT LIMIT UNLIMITED""".format(self.tableName, self.directoryName, remoteNameFile)
response = self.__execThisQuery__(query=query,isquery=False)
if isinstance(response,Exception) :
logging.info('Error with the SQL request {0}: {1}'.format(query,str(response)))
return response
else : return True
def __dropTable__(self):
'''
Drop the table with, for exemple
DROP TABLE my_table PURGE;
'''
logging.info('Drop the table: {0}'.format(self.tableName))
query = "DROP TABLE {0} PURGE".format(self.tableName)
response = self.__execThisQuery__(query=query,isquery=False)
if isinstance(response,Exception) :
logging.info('Error with the SQL request {0}: {1}'.format(query,str(response)))
return response
else : return True
def getFile (self,remotePath, remoteNameFile, localFile):
'''
Create the localFile file containing data stored on the remoteNameFile (stored in the remotePath)
'''
data = ""
logging.info("Copy the {0} remote file (stored in {1}) to {2}".format(remoteNameFile,remotePath,localFile))
status = self.__createOrRemplaceDirectory__(remotePath)
if isinstance(status,Exception): return status
status = self.__createTableForReadFile__(remoteNameFile)
if isinstance(status,Exception): return status
request = "select line from {0}".format(self.tableName)
response = self.__execThisQuery__(query=request,ld=['line'])
if isinstance(response,Exception):
logging.info('Error with the SQL request {0}: {1}'.format(request,response))
status = self.__dropDirectory__()
status = self.__dropTable__()
return response
else :
for l in response:
data += l['line']+'\n'
status = self.__dropDirectory__()
status = self.__dropTable__()
return data
def execute (self, remotePath, remoteNameFile):
'''
Execute a command
'''
logging.info("Execute the {0} command stored stored in {1}".format(remoteNameFile,remotePath))
status = self.__createOrRemplaceDirectory__(remotePath)
if isinstance(status,Exception): return status
status = self.__createTableForExec__(remoteNameFile)
if isinstance(status,Exception): return status
request = "select line from {0}".format(self.tableName)
response = self.__execThisQuery__(query=request, ld=['line'])
if isinstance(response,Exception):
logging.info('Error with the SQL request {0}: {1}'.format(request,response))
status = self.__dropDirectory__()
status = self.__dropTable__()
return response
else :
logging.info("{0} command executed without errors".format(remoteNameFile))
status = self.__dropDirectory__()
status = self.__dropTable__()
return response
def testAll(self):
'''
Test all functions
'''
folder = self.__generateRandomString__()
self.args['print'].subtitle("External table to read files ?")
logging.info("Simulate the file reading in the {0} folder thanks to an external table".format(folder))
status = self.getFile(remotePath=folder, remoteNameFile='data.txt', localFile="test.txt")
if (status == True or self.ERROR_EXTERNAL_TABLE_WITH_WRITE in str(status) or self.ERROR_EXTERNAL_TABLE_READ in str(status)):
self.args['print'].goodNews("OK")
else :
self.args['print'].badNews("KO")
self.args['print'].subtitle("External table to execute system commands ?")
logging.info("Simulate the file execution thanks to an external table")
status = self.execute (remotePath=folder, remoteNameFile='test')
if (status == True or self.ERROR_EXTERNAL_TABLE_WITH_WRITE in str(status) or self.ERROR_EXTERNAL_TABLE_READ in str(status)):
self.args['print'].goodNews("OK")
else :
self.args['print'].badNews("KO")
def runExternalTableModule (args):
'''
Run the External Table module
'''
status = True
if checkOptionsGivenByTheUser(args,["test-module","getFile","exec"]) == False : return EXIT_MISS_ARGUMENT
externalTable = ExternalTable(args)
status = externalTable.connection(stopIfError=True)
if args['test-module'] == True :
args['print'].title("Test if the External Table module can be used")
status = externalTable.testAll()
#Option 1: getFile
if args['getFile'] != None:
args['print'].title("Read the {0} file stored in the {1} path".format(args['getFile'][1],args['getFile'][0]))
data = externalTable.getFile (remotePath=args['getFile'][0], remoteNameFile=args['getFile'][1], localFile=args['getFile'][2])
if isinstance(data,Exception):
args['print'].badNews("There is an error: {0}".format(data))
else:
args['print'].goodNews("Data stored in the remote file {0} stored in {1}".format(args['getFile'][1],args['getFile'][0]))
print(data)
#Option 2: exec a script or command
if args['exec'] != None:
args['print'].title("Execute the {0} command stored in the {1} path".format(args['exec'][1],args['exec'][0]))
data = externalTable.execute (remotePath=args['exec'][0], remoteNameFile=args['exec'][1])
if isinstance(data,Exception):
args['print'].badNews("There is an error: {0}".format(data))
else:
args['print'].goodNews("The {0} command stored in {1} has been executed (normally)".format(args['exec'][1],args['exec'][0]))
| lgpl-3.0 |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_06_01/aio/operations/_deleted_web_apps_operations.py | 1 | 11568 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeletedWebAppsOperations:
"""DeletedWebAppsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DeletedWebAppCollection"]:
"""Get all deleted apps for a subscription.
Description for Get all deleted apps for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedWebAppCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DeletedWebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedWebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedWebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites'} # type: ignore
def list_by_location(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.DeletedWebAppCollection"]:
"""Get all deleted apps for a subscription at location.
Description for Get all deleted apps for a subscription at location.
:param location:
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedWebAppCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.DeletedWebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedWebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedWebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites'} # type: ignore
async def get_deleted_web_app_by_location(
self,
location: str,
deleted_site_id: str,
**kwargs: Any
) -> "_models.DeletedSite":
"""Get deleted app for a subscription at location.
Description for Get deleted app for a subscription at location.
:param location:
:type location: str
:param deleted_site_id: The numeric ID of the deleted app, e.g. 12345.
:type deleted_site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSite, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.DeletedSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_web_app_by_location.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'deletedSiteId': self._serialize.url("deleted_site_id", deleted_site_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeletedSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_web_app_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites/{deletedSiteId}'} # type: ignore
| mit |
werehuman/cocaine-tools | cocaine/tools/actions/crashlog.py | 1 | 7060 | #
# Copyright (c) 2013+ Anton Tyurin <noxiouz@yandex.ru>
# Copyright (c) 2013+ Evgeny Safronov <division494@gmail.com>
# Copyright (c) 2011-2014 Other contributors as noted in the AUTHORS file.
#
# This file is part of Cocaine-tools.
#
# Cocaine is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cocaine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import itertools
import time
from tornado import gen
from cocaine.tools import actions, log
from cocaine.decorators import coroutine
from cocaine.tools.actions import app
__author__ = 'Evgeny Safronov <division494@gmail.com>'
def parse_crashlog_day_format(day_string):
index_format = 'cocaine-%Y-%m-%d'
if not day_string:
return day_string
if 'today'.startswith(day_string):
return datetime.date.today().strftime(index_format)
elif 'yesterday'.startswith(day_string):
yesterday = datetime.date.today() - datetime.timedelta(days=1)
return yesterday.strftime(index_format)
else:
values_count = day_string.count("-")
if values_count == 0: # only day specified
today = datetime.date.today()
day = datetime.datetime.strptime(day_string, "%d").replace(year=today.year,
month=today.month)
return day.strftime(index_format)
elif values_count == 1: # day and month
day = datetime.datetime.strptime(day_string,
"%d-%m").replace(year=datetime.date.today().year)
return day.strftime(index_format)
elif values_count == 2: # the whole date
return datetime.datetime.strptime(day_string, "%d-%m-%Y").strftime(index_format)
raise ValueError("Invalid day format %s. Must be day-month-year|today|yesterday" % day_string)
class List(actions.Storage):
def __init__(self, storage, name, day_string=''):
super(List, self).__init__(storage)
self.name = name
if not self.name:
raise ValueError('Please specify a crashlog name')
self.day = parse_crashlog_day_format(day_string)
@coroutine
def execute(self):
indexes = [self.name]
if self.day:
indexes.append(self.day)
channel = yield self.storage.find('crashlogs', indexes)
listing = yield channel.rx.get()
raise gen.Return(listing)
def _parseCrashlogs(crashlogs, timestamp=None):
def is_filter(arg):
return arg == timestamp if timestamp else True
_list = (log.split(':', 1) for log in crashlogs)
return [(ts, time.ctime(float(ts) / 1000000), name) for ts, name in _list if is_filter(ts)]
class Specific(actions.Storage):
def __init__(self, storage, name, timestamp=None):
super(Specific, self).__init__(storage)
self.name = name
self.timestamp = timestamp
if not self.name:
raise ValueError('Please specify application name')
class View(Specific):
@coroutine
def execute(self):
channel = yield self.storage.find('crashlogs', [self.name])
crashlogs = yield channel.rx.get()
parsed_crashlogs = _parseCrashlogs(crashlogs, timestamp=self.timestamp)
contents = []
for crashlog in parsed_crashlogs:
key = '%s:%s' % (crashlog[0], crashlog[2])
channel = yield self.storage.read('crashlogs', key)
content = yield channel.rx.get()
contents.append(content)
raise gen.Return(''.join(contents))
class Remove(Specific):
@coroutine
def execute(self):
channel = yield self.storage.find('crashlogs', [self.name])
crashlogs = yield channel.rx.get()
parsed_crashlogs = _parseCrashlogs(crashlogs, timestamp=self.timestamp)
for crashlog in parsed_crashlogs:
try:
key = '%s:%s' % (crashlog[0], crashlog[2])
channel = yield self.storage.remove('crashlogs', key)
yield channel.rx.get()
except Exception as err:
log.error("unable to delete crashlog %s: %s", str(crashlog), err)
raise gen.Return('Done')
class RemoveAll(Remove):
def __init__(self, storage, name):
super(RemoveAll, self).__init__(storage, name, timestamp=None)
class Status(actions.Storage):
@coroutine
def execute(self):
applications = yield app.List(self.storage).execute()
crashed = []
for application in applications:
crashlogs = yield List(self.storage, application).execute()
if crashlogs:
last = max(_parseCrashlogs(crashlogs), key=lambda (timestamp, time, uuid): timestamp)
crashed.append((application, last, len(crashlogs)))
raise gen.Return(crashed)
def splitted(collection, sep=None, maxsplit=None):
for item in collection:
yield item.split(sep, maxsplit)
def filtered(crashlogs):
for (ts, uuid) in splitted(crashlogs, ':', 1):
yield int(ts), uuid
class Clean(Specific):
def __init__(self, storage, name, size, timestamp=None):
super(Clean, self).__init__(storage, name, timestamp)
self.size = int(size)
@coroutine
def execute(self):
if not self.name:
apps = yield app.List(self.storage).execute()
else:
apps = [self.name]
result = []
if self.timestamp:
try:
dt = datetime.datetime.strptime(self.timestamp, '%Y-%m-%dT%H:%M:%S')
timestamp = int(time.mktime(dt.timetuple())) * 1000000 + dt.microsecond
except ValueError:
timestamp = int(self.timestamp)
for app_name in apps:
channel = yield self.storage.find('crashlogs', [app_name])
crashlogs = yield channel.rx.get()
result = filter(lambda (ts, uuid): ts < timestamp, filtered(crashlogs))
elif self.size > 0:
for app_name in apps:
channel = yield self.storage.find('crashlogs', [app_name])
crashlogs = yield channel.rx.get()
result = itertools.islice(
sorted(filtered(crashlogs[0]), key=lambda (ts, uuid): ts, reverse=True), self.size, None)
for crashlog in result:
print('removing', '%d:%s' % crashlog)
channel = yield self.storage.remove('crashlogs', '%d:%s' % crashlog)
yield channel.rx.get()
raise gen.Return('Done')
| lgpl-3.0 |
veridiam/Madcow-Waaltz | madcow/tasks/tweets.py | 7 | 1771 | """Prints tweets to the channel."""
import time
import twitter
from madcow.conf import settings
from madcow.util import strip_html, Task
class Main(Task):
def init(self):
self.frequency = settings.TWITTER_UPDATE_FREQ
self.output = settings.TWITTER_CHANNELS
self.api = twitter.Api(username=settings.TWITTER_CONSUMER_KEY,
password=settings.TWITTER_CONSUMER_SECRET,
access_token_key=settings.TWITTER_TOKEN_KEY,
access_token_secret=settings.TWITTER_TOKEN_SECRET)
self.api.SetCache(None) # this fills up /tmp :(
self.last_id = None
@staticmethod
def get_max_id(tweets):
return max(tweets, key=lambda tweet: tweet.id).id
def response(self, *args):
self.log.debug('checking twitter for new tweets')
status = self.api.GetRateLimitStatus()
self.log.debug('rate limit status: %r', status)
if status['remaining_hits'] < 10:
raise ValueError('Hitting the Twitter limit, backing off!')
# first run, just throw away all
if self.last_id is None:
self.last_id = self.get_max_id(self.api.GetFriendsTimeline())
self.log.info('set last twitter id to %d', self.last_id)
else:
tweets = self.api.GetFriendsTimeline(since_id=self.last_id)
if tweets:
lines = []
for tweet in reversed(tweets):
if tweet.id > self.last_id:
lines.append(u">> tweet from %s: %s <<" % (tweet.user.screen_name, strip_html(tweet.text)))
self.last_id = self.get_max_id(tweets)
if lines:
return u'\n'.join(lines)
| gpl-3.0 |
kemalakyol48/python-for-android | python3-alpha/python3-src/setup.py | 43 | 84771 | # Autodetecting setup.py script for building the Python extensions
#
__version__ = "$Revision$"
import sys, os, imp, re, optparse
from glob import glob
import sysconfig
from distutils import log
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.command.build_scripts import build_scripts
from distutils.spawn import find_executable
cross_compile=(os.environ.get('CROSS_COMPILE_TARGET') == 'yes')
third_party_dir=os.getenv('THIRD_PARTY_DIR')
if third_party_dir is None:
third_party_dir='.'
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = ['ossaudiodev', 'nis']
# File which contains the directory for shared mods (for sys.path fixup
# when running from the build dir, see Modules/getpath.c)
_BUILDDIR_COOKIE = "pybuilddir.txt"
def add_dir_to_list(dirlist, dir):
"""Add the directory 'dir' to the list 'dirlist' (after any relative
directories) if:
1) 'dir' is not already in 'dirlist'
2) 'dir' actually exists, and is a directory.
"""
if dir is None or not os.path.isdir(dir) or dir in dirlist:
return
for i, path in enumerate(dirlist):
if not os.path.isabs(path):
dirlist.insert(i + 1, dir)
return
dirlist.insert(0, dir)
def macosx_sdk_root():
"""
Return the directory of the current OSX SDK,
or '/' if no SDK was specified.
"""
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
return sysroot
def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
if sys.platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
sysroot = macosx_sdk_root()
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
result = compiler.find_library_file(std_dirs + paths, libname)
if result is None:
return None
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
dirname = os.path.dirname(result)
for p in std_dirs:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
if p == dirname:
return [ ]
# Otherwise, it must have been in one of the additional directories,
# so we have to figure out which one.
for p in paths:
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
if sys.platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
if p == dirname:
return [p]
else:
assert False, "Internal error: Path not found in std_dirs or paths"
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
def find_module_file(module, dirlist):
"""Find a module in a set of possible folders. If it is not found
return the unadorned filename"""
list = find_file(module, [], dirlist)
if not list:
return module
if len(list) > 1:
log.info("WARNING: multiple copies of %s found"%module)
return os.path.join(list[0], module)
class PyBuildExt(build_ext):
def __init__(self, dist):
build_ext.__init__(self, dist)
self.failed = []
def build_extensions(self):
# Detect which modules should be compiled
old_so = self.compiler.shared_lib_extension
# Workaround PEP 3149 stuff
self.compiler.shared_lib_extension = os.environ.get("SO", ".so")
try:
missing = self.detect_modules()
finally:
self.compiler.shared_lib_extension = old_so
# Remove modules that are present on the disabled list
extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# move ctypes to the end, it depends on other modules
ext_map = dict((ext.name, i) for i, ext in enumerate(extensions))
if "_ctypes" in ext_map:
ctypes = extensions.pop(ext_map["_ctypes"])
extensions.append(ctypes)
self.extensions = extensions
# Fix up the autodetected modules, prefixing all the source files
# with Modules/.
srcdir = sysconfig.get_config_var('srcdir')
if not srcdir:
# Maybe running on Windows but not using CYGWIN?
raise ValueError("No source directory; cannot proceed.")
srcdir = os.path.abspath(srcdir)
moddirlist = [os.path.join(srcdir, 'Modules')]
# Platform-dependent module source and include directories
platform = self.get_platform()
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
# Python header files
headers = [sysconfig.get_config_h_filename()]
headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
if ext.depends is not None:
ext.depends = [find_module_file(filename, moddirlist)
for filename in ext.depends]
else:
ext.depends = []
# re-compile extensions if a header file has been changed
ext.depends.extend(headers)
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup and Modules/Setup.local to figure out which
# modules are turned on in the file.
remove_modules = []
for filename in ('Modules/Setup', 'Modules/Setup.local'):
input = text_file.TextFile(filename, join_lines=1)
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append(line[0])
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
# Not only do we write the builddir cookie, but we manually install
# the shared modules directory if it isn't already in sys.path.
# Otherwise trying to import the extensions after building them
# will fail.
with open(_BUILDDIR_COOKIE, "wb") as f:
f.write(self.build_lib.encode('utf-8', 'surrogateescape'))
abs_build_lib = os.path.join(os.getcwd(), self.build_lib)
if abs_build_lib not in sys.path:
sys.path.append(abs_build_lib)
build_ext.build_extensions(self)
longest = max([len(e.name) for e in self.extensions])
if self.failed:
longest = max(longest, max([len(name) for name in self.failed]))
def print_three_column(lst):
lst.sort(key=str.lower)
# guarantee zip() doesn't drop anything
while len(lst) % 3:
lst.append("")
for e, f, g in zip(lst[::3], lst[1::3], lst[2::3]):
print("%-*s %-*s %-*s" % (longest, e, longest, f,
longest, g))
if missing:
print()
print("Python build finished, but the necessary bits to build "
"these modules were not found:")
print_three_column(missing)
print("To find the necessary bits, look in setup.py in"
" detect_modules() for the module's name.")
print()
if self.failed:
failed = self.failed[:]
print()
print("Failed to build these modules:")
print_three_column(failed)
print()
def build_extension(self, ext):
if ext.name == '_ctypes':
if not self.configure_ctypes(ext):
return
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError) as why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
self.failed.append(ext.name)
return
# Workaround for Mac OS X: The Carbon-based modules cannot be
# reliably imported into a command-line Python
if 'Carbon' in ext.extra_link_args:
self.announce(
'WARNING: skipping import check for Carbon-based "%s"' %
ext.name)
return
if self.get_platform() == 'darwin' and (
sys.maxsize > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
# only used to build 32-bit only extensions in a 4-way
# universal build and loading 32-bit code into a 64-bit
# process will fail.
self.announce(
'WARNING: skipping import check for "%s"' %
ext.name)
return
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
if self.get_platform() == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
if cross_compile:
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
# If the build directory didn't exist when setup.py was
# started, sys.path_importer_cache has a negative result
# cached. Clear that cache before trying to import.
sys.path_importer_cache.clear()
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError as why:
if not cross_compile:
self.announce('*** WARNING: renaming "%s" since importing it'
' failed: %s' % (ext.name, why), level=3)
assert not self.inplace
basename, tail = os.path.splitext(ext_filename)
newname = basename + "_failed" + tail
if os.path.exists(newname):
os.remove(newname)
os.rename(ext_filename, newname)
# XXX -- This relies on a Vile HACK in
# distutils.command.build_ext.build_extension(). The
# _built_objects attribute is stored there strictly for
# use here.
# If there is a failure, _built_objects may not be there,
# so catch the AttributeError and move on.
try:
for filename in self._built_objects:
os.remove(filename)
except AttributeError:
self.announce('unable to remove files (ignored)')
else:
self.announce('WARNING: "%s" failed importing, but we leave it '
'because we are cross-compiling' %
ext.name)
except:
exc_type, why, tb = sys.exc_info()
self.announce('*** WARNING: importing extension "%s" '
'failed with %s: %s' % (ext.name, exc_type, why),
level=3)
self.failed.append(ext.name)
def get_platform(self):
# Get value of sys.platform
for platform in ['cygwin', 'darwin', 'osf1']:
if sys.platform.startswith(platform):
return platform
return sys.platform
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
if not find_executable('dpkg-architecture'):
return
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
'dpkg-architecture -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
tmpfile)
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
multiarch_path_component = fp.readline().strip()
add_dir_to_list(self.compiler.library_dirs,
'/usr/lib/' + multiarch_path_component)
add_dir_to_list(self.compiler.include_dirs,
'/usr/include/' + multiarch_path_component)
finally:
os.unlink(tmpfile)
def detect_modules(self):
# Ensure that /usr/local is always used, but the local build
# directories (i.e. '.' and 'Include') must be first. See issue
# 10520.
if not cross_compile:
add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
self.add_multiarch_paths()
else:
add_dir_to_list(self.compiler.library_dirs,os.path.join(third_party_dir,'lib'))
add_dir_to_list(self.compiler.include_dirs,os.path.join(third_party_dir,'include'))
add_dir_to_list(self.compiler.include_dirs,os.path.join(third_party_dir,'include','ncurses'))
# Add paths specified in the environment variables LDFLAGS and
# CPPFLAGS for header and library files.
# We must get the values from the Makefile and not the environment
# directly since an inconsistently reproducible issue comes up where
# the environment variable is not set even though the value were passed
# into configure and stored in the Makefile (issue found on OS X 10.3).
for env_var, arg_name, dir_list in (
('LDFLAGS', '-R', self.compiler.runtime_library_dirs),
('LDFLAGS', '-L', self.compiler.library_dirs),
('CPPFLAGS', '-I', self.compiler.include_dirs)):
env_val = sysconfig.get_config_var(env_var)
if env_val:
# To prevent optparse from raising an exception about any
# options in env_val that it doesn't know about we strip out
# all double dashes and any dashes followed by a character
# that is not for the option we are dealing with.
#
# Please note that order of the regex is important! We must
# strip out double-dashes first so that we don't end up with
# substituting "--Long" to "-Long" and thus lead to "ong" being
# used for a library directory.
env_val = re.sub(r'(^|\s+)-(-|(?!%s))' % arg_name[1],
' ', env_val)
parser = optparse.OptionParser()
# Make sure that allowing args interspersed with options is
# allowed
parser.allow_interspersed_args = True
parser.error = lambda msg: None
parser.add_option(arg_name, dest="dirs", action="append")
options = parser.parse_args(env_val.split())[0]
if options.dirs:
for directory in reversed(options.dirs):
add_dir_to_list(dir_list, directory)
if os.path.normpath(sys.prefix) != '/usr' \
and not sysconfig.get_config_var('PYTHONFRAMEWORK'):
# OSX note: Don't add LIBDIR and INCLUDEDIR to building a framework
# (PYTHONFRAMEWORK is set) to avoid # linking problems when
# building a framework with different architectures than
# the one that is currently installed (issue #7473)
add_dir_to_list(self.compiler.library_dirs,
sysconfig.get_config_var("LIBDIR"))
add_dir_to_list(self.compiler.include_dirs,
sysconfig.get_config_var("INCLUDEDIR"))
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
if cross_compile:
lib_dirs = [os.path.join(third_party_dir,'lib')] + self.compiler.library_dirs
else:
lib_dirs = self.compiler.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
if cross_compile:
third_include=os.path.join(third_party_dir,'include')
inc_dirs= [third_include,os.path.join(third_include,'ncurses')] + self.compiler.include_dirs
else:
inc_dirs = self.compiler.include_dirs + ['/usr/include']
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
platform = self.get_platform()
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
if platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform == 'darwin':
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c', '_math.c'],
depends=['_math.h'],
libraries=math_libs) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c', '_time.c'],
libraries=math_libs) )
exts.append( Extension('_datetime', ['_datetimemodule.c', '_time.c'],
libraries=math_libs) )
# random number generator implemented in C
exts.append( Extension("_random", ["_randommodule.c"]) )
# bisect
exts.append( Extension("_bisect", ["_bisectmodule.c"]) )
# heapq
exts.append( Extension("_heapq", ["_heapqmodule.c"]) )
# C-optimized pickle replacement
exts.append( Extension("_pickle", ["_pickle.c"]) )
# atexit
exts.append( Extension("atexit", ["atexitmodule.c"]) )
# _json speedups
exts.append( Extension("_json", ["_json.c"]) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c'],
depends=['testcapi_long.h']) )
# profiler (_lsprof is for cProfile.py)
exts.append( Extension('_lsprof', ['_lsprof.c', 'rotatingtree.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
libs = []
if (config_h_vars.get('FLOCK_NEEDS_LIBBSD', False)):
# May be necessary on AIX for flock function
libs = ['bsd']
exts.append( Extension('fcntl', ['fcntlmodule.c'], libraries=libs) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
# grp(3)
exts.append( Extension('grp', ['grpmodule.c']) )
# spwd, shadow passwords
if (config_h_vars.get('HAVE_GETSPNAM', False) or
config_h_vars.get('HAVE_GETSPENT', False)):
exts.append( Extension('spwd', ['spwdmodule.c']) )
else:
missing.append('spwd')
# select(2); not on ancient System V
exts.append( Extension('select', ['selectmodule.c']) )
# Fred Drake's interface to the Python parser
exts.append( Extension('parser', ['parsermodule.c']) )
# Memory-mapped files (also works on Win32).
exts.append( Extension('mmap', ['mmapmodule.c']) )
# Lance Ellinghaus's syslog module
# syslog daemon interface
exts.append( Extension('syslog', ['syslogmodule.c']) )
#
# Here ends the simple stuff. From here on, modules need certain
# libraries, are platform-specific, or present other surprises.
#
# Multimedia modules
# These don't work for 64-bit platforms!!!
# These represent audio samples or images as strings:
# Operations on audio samples
# According to #993173, this one should actually work fine on
# 64-bit platforms.
exts.append( Extension('audioop', ['audioop.c']) )
# import pdb; pdb.set_trace()
# readline
do_readline = self.compiler.find_library_file(lib_dirs, 'readline')
readline_termcap_library = ""
curses_library = ""
# import pdb; pdb.set_trace()
# Determine if readline is already linked against curses or tinfo.
if do_readline and find_executable('ldd'):
# Cannot use os.popen here in py3k.
tmpfile = os.path.join(self.build_temp, 'readline_termcap_lib')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system("ldd %s > %s" % (do_readline, tmpfile))
if ret >> 8 == 0:
with open(tmpfile) as fp:
for ln in fp:
if 'curses' in ln:
readline_termcap_library = re.sub(
r'.*lib(n?cursesw?)\.so.*', r'\1', ln
).rstrip()
break
# termcap interface split out from ncurses
if 'tinfo' in ln:
readline_termcap_library = 'tinfo'
break
os.unlink(tmpfile)
# Issue 7384: If readline is already linked against curses,
# use the same library for the readline and curses modules.
if 'curses' in readline_termcap_library:
curses_library = readline_termcap_library
elif self.compiler.find_library_file(lib_dirs, 'ncursesw'):
curses_library = 'ncursesw'
elif self.compiler.find_library_file(lib_dirs, 'ncurses'):
curses_library = 'ncurses'
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
if platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if dep_target and dep_target.split('.') < ['10', '5']:
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
# the readline module unless the user has installed a fixed
# readline package
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
if platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a staticly linked custom readline gets picked up
# before the (possibly broken) dynamic library in /usr/lib.
readline_extra_link_args = ('-Wl,-search_paths_first',)
else:
readline_extra_link_args = ()
readline_libs = ['readline']
if readline_termcap_library:
pass # Issue 7384: Already linked against curses or tinfo.
elif curses_library:
readline_libs.append(curses_library)
elif self.compiler.find_library_file(lib_dirs +
['/usr/lib/termcap'],
'termcap'):
readline_libs.append('termcap')
exts.append( Extension('readline', ['readline.c'],
library_dirs=['/usr/lib/termcap'],
extra_link_args=readline_extra_link_args,
libraries=readline_libs) )
else:
missing.append('readline')
# crypt module.
if self.compiler.find_library_file(lib_dirs, 'crypt'):
libs = ['crypt']
else:
libs = []
exts.append( Extension('crypt', ['cryptmodule.c'], libraries=libs) )
# CSV files
exts.append( Extension('_csv', ['_csv.c']) )
# POSIX subprocess module helper.
exts.append( Extension('_posixsubprocess', ['_posixsubprocess.c']) )
# socket(2)
exts.append( Extension('_socket', ['socketmodule.c'],
depends = ['socketmodule.h']) )
if cross_compile:
search_standard_path_incs = []
search_standard_path_libs = []
else:
search_standard_path_incs = inc_dirs
search_standard_path_libs = lib_dirs
# Detect SSL support for the socket module (via _ssl)
if cross_compile:
search_for_ssl_incs_in = [os.path.join(third_party_dir,'include')]
else:
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
'/usr/contrib/ssl/include/'
]
ssl_incs = find_file('openssl/ssl.h', search_standard_path_incs,
search_for_ssl_incs_in
)
if ssl_incs is not None:
krb5_h = find_file('krb5.h', inc_dirs,
['/usr/kerberos/include'])
if krb5_h:
ssl_incs += krb5_h
if cross_compile:
search_for_ssl_libs_in = [os.path.join(third_party_dir,'lib')]
else:
search_for_ssl_libs_in = ['/usr/local/ssl/lib',
'/usr/contrib/ssl/lib/'
]
ssl_libs = find_library_file(self.compiler, 'ssl',search_standard_path_libs,search_for_ssl_libs_in )
print("openssl: ",ssl_incs,ssl_libs,inc_dirs)
if (ssl_incs is not None and
ssl_libs is not None):
exts.append( Extension('_ssl', ['_ssl.c'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto'],
depends = ['socketmodule.h']), )
else:
missing.append('_ssl')
# find out which version of OpenSSL we have
openssl_ver = 0
openssl_ver_re = re.compile(
'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' )
# look for the openssl version header on the compiler search path.
opensslv_h = find_file('openssl/opensslv.h', [],
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
if sys.platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
with open(name, 'r') as incfile:
for line in incfile:
m = openssl_ver_re.match(line)
if m:
openssl_ver = eval(m.group(1))
except IOError as msg:
print("IOError while reading opensshv.h:", msg)
pass
#print('openssl_ver = 0x%08x' % openssl_ver)
min_openssl_ver = 0x00907000
have_any_openssl = ssl_incs is not None and ssl_libs is not None
have_usable_openssl = (have_any_openssl and
openssl_ver >= min_openssl_ver)
if have_any_openssl:
if have_usable_openssl:
# The _hashlib module wraps optimized implementations
# of hash functions from the OpenSSL library.
exts.append( Extension('_hashlib', ['_hashopenssl.c'],
depends = ['hashlib.h'],
include_dirs = ssl_incs,
library_dirs = ssl_libs,
libraries = ['ssl', 'crypto']) )
else:
print("warning: openssl 0x%08x is too old for _hashlib" %
openssl_ver)
missing.append('_hashlib')
min_sha2_openssl_ver = 0x00908000
if COMPILED_WITH_PYDEBUG or openssl_ver < min_sha2_openssl_ver:
# OpenSSL doesn't do these until 0.9.8 so we'll bring our own hash
exts.append( Extension('_sha256', ['sha256module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha512', ['sha512module.c'],
depends=['hashlib.h']) )
if COMPILED_WITH_PYDEBUG or not have_usable_openssl:
# no openssl at all, use our own md5 and sha1
exts.append( Extension('_md5', ['md5module.c'],
depends=['hashlib.h']) )
exts.append( Extension('_sha1', ['sha1module.c'],
depends=['hashlib.h']) )
# Modules that provide persistent dictionary-like semantics. You will
# probably want to arrange for at least one of them to be available on
# your machine, though none are defined by default because of library
# dependencies. The Python module dbm/__init__.py provides an
# implementation independent wrapper for these; dbm/dumb.py provides
# similar functionality (but slower of course) implemented in Python.
# Sleepycat^WOracle Berkeley DB interface.
# http://www.oracle.com/database/berkeley-db/db/index.html
#
# This requires the Sleepycat^WOracle DB code. The supported versions
# are set below. Visit the URL above to download
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (5, 1)
min_db_ver = (3, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
"""Returns a boolean if the given BerkeleyDB version is acceptable.
Args:
db_ver: A tuple of the version to verify.
"""
if not (min_db_ver <= db_ver <= max_db_ver):
return False
return True
def gen_db_minor_ver_nums(major):
if major == 4:
for x in range(max_db_ver[1]+1):
if allow_db_ver((4, x)):
yield x
elif major == 3:
for x in (3,):
if allow_db_ver((3, x)):
yield x
else:
raise ValueError("unknown major BerkeleyDB version", major)
# construct a list of paths to look for the header file in on
# top of the normal inc_dirs.
db_inc_paths = [
'/usr/include/db4',
'/usr/local/include/db4',
'/opt/sfw/include/db4',
'/usr/include/db3',
'/usr/local/include/db3',
'/opt/sfw/include/db3',
# Fink defaults (http://fink.sourceforge.net/)
'/sw/include/db4',
'/sw/include/db3',
]
# 4.x minor number specific paths
for x in gen_db_minor_ver_nums(4):
db_inc_paths.append('/usr/include/db4%d' % x)
db_inc_paths.append('/usr/include/db4.%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.4.%d/include' % x)
db_inc_paths.append('/usr/local/include/db4%d' % x)
db_inc_paths.append('/pkg/db-4.%d/include' % x)
db_inc_paths.append('/opt/db-4.%d/include' % x)
# MacPorts default (http://www.macports.org/)
db_inc_paths.append('/opt/local/include/db4%d' % x)
# 3.x minor number specific paths
for x in gen_db_minor_ver_nums(3):
db_inc_paths.append('/usr/include/db3%d' % x)
db_inc_paths.append('/usr/local/BerkeleyDB.3.%d/include' % x)
db_inc_paths.append('/usr/local/include/db3%d' % x)
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
# the user has added that prefix into inc_dirs.
std_variants = []
for dn in inc_dirs:
std_variants.append(os.path.join(dn, 'db3'))
std_variants.append(os.path.join(dn, 'db4'))
for x in gen_db_minor_ver_nums(4):
std_variants.append(os.path.join(dn, "db4%d"%x))
std_variants.append(os.path.join(dn, "db4.%d"%x))
for x in gen_db_minor_ver_nums(3):
std_variants.append(os.path.join(dn, "db3%d"%x))
std_variants.append(os.path.join(dn, "db3.%d"%x))
db_inc_paths = std_variants + db_inc_paths
db_inc_paths = [p for p in db_inc_paths if os.path.exists(p)]
db_ver_inc_map = {}
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
try:
# See whether there is a Sleepycat header in the standard
# search path.
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print("db: looking for db.h in", f)
if os.path.exists(f):
with open(f, 'rb') as file:
f = file.read()
m = re.search(br"#define\WDB_VERSION_MAJOR\W(\d+)", f)
if m:
db_major = int(m.group(1))
m = re.search(br"#define\WDB_VERSION_MINOR\W(\d+)", f)
db_minor = int(m.group(1))
db_ver = (db_major, db_minor)
# Avoid 4.6 prior to 4.6.21 due to a BerkeleyDB bug
if db_ver == (4, 6):
m = re.search(br"#define\WDB_VERSION_PATCH\W(\d+)", f)
db_patch = int(m.group(1))
if db_patch < 21:
print("db.h:", db_ver, "patch", db_patch,
"being ignored (4.6.x must be >= 4.6.21)")
continue
if ( (db_ver not in db_ver_inc_map) and
allow_db_ver(db_ver) ):
# save the include directory with the db.h version
# (first occurrence only)
db_ver_inc_map[db_ver] = d
if db_setup_debug:
print("db.h: found", db_ver, "in", d)
else:
# we already found a header for this library version
if db_setup_debug: print("db.h: ignoring", d)
else:
# ignore this header, it didn't contain a version number
if db_setup_debug:
print("db.h: no version number version in", d)
db_found_vers = list(db_ver_inc_map.keys())
db_found_vers.sort()
while db_found_vers:
db_ver = db_found_vers.pop()
db_incdir = db_ver_inc_map[db_ver]
# check lib directories parallel to the location of the header
db_dirs_to_check = [
db_incdir.replace("include", 'lib64'),
db_incdir.replace("include", 'lib'),
]
if sys.platform != 'darwin':
db_dirs_to_check = list(filter(os.path.isdir, db_dirs_to_check))
else:
# Same as other branch, but takes OSX SDK into account
tmp = []
for dn in db_dirs_to_check:
if is_macosx_sdk_path(dn):
if os.path.isdir(os.path.join(sysroot, dn[1:])):
tmp.append(dn)
else:
if os.path.isdir(dn):
tmp.append(dn)
db_dirs_to_check = tmp
db_dirs_to_check = tmp
# Look for a version specific db-X.Y before an ambiguous dbX
# XXX should we -ever- look for a dbX name? Do any
# systems really not name their library by version and
# symlink to more general names?
for dblib in (('db-%d.%d' % db_ver),
('db%d%d' % db_ver),
('db%d' % db_ver[0])):
dblib_file = self.compiler.find_library_file(
db_dirs_to_check + lib_dirs, dblib )
if dblib_file:
dblib_dir = [ os.path.abspath(os.path.dirname(dblib_file)) ]
raise db_found
else:
if db_setup_debug: print("db lib: ", dblib, "not found")
except db_found:
if db_setup_debug:
print("bsddb using BerkeleyDB lib:", db_ver, dblib)
print("bsddb lib dir:", dblib_dir, " inc dir:", db_incdir)
db_incs = [db_incdir]
dblibs = [dblib]
else:
if db_setup_debug: print("db: no appropriate library found")
db_incs = None
dblibs = []
dblib_dir = None
# The sqlite interface
sqlite_setup_debug = False # verbose debug prints from this script?
# We hunt for #define SQLITE_VERSION "n.n.n"
# We need to find >= sqlite version 3.0.8
sqlite_incdir = sqlite_libdir = None
if cross_compile:
sqlite_inc_paths=[os.path.join(third_party_dir,'include')]
else:
sqlite_inc_paths = [ '/usr/include',
'/usr/include/sqlite',
'/usr/include/sqlite3',
'/usr/local/include',
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
if sys.platform == 'darwin':
sysroot = macosx_sdk_root()
for d in inc_dirs + sqlite_inc_paths:
f = os.path.join(d, "sqlite3.h")
if sys.platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print("sqlite: found %s"%f)
with open(f) as file:
incf = file.read()
m = re.search(
r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
for x in sqlite_version.split(".")])
if sqlite_version_tuple >= MIN_SQLITE_VERSION_NUMBER:
# we win!
if sqlite_setup_debug:
print("%s/sqlite3.h: version %s"%(d, sqlite_version))
sqlite_incdir = d
break
else:
if sqlite_setup_debug:
print("%s: version %d is too old, need >= %s"%(d,
sqlite_version, MIN_SQLITE_VERSION))
elif sqlite_setup_debug:
print("sqlite: %s had no SQLITE_VERSION"%(f,))
if sqlite_incdir:
print("sqlite: incdir=",sqlite_incdir)
if cross_compile:
sqlite_dirs_to_check = [os.path.join(third_party_dir,'lib')]
else:
sqlite_dirs_to_check = [
os.path.join(sqlite_incdir, '..', 'lib64'),
os.path.join(sqlite_incdir, '..', 'lib'),
os.path.join(sqlite_incdir, '..', '..', 'lib64'),
os.path.join(sqlite_incdir, '..', '..', 'lib'),
]
sqlite_libfile = self.compiler.find_library_file(
sqlite_dirs_to_check + lib_dirs, 'sqlite3')
print("sqlite_libfile=",sqlite_libfile,lib_dirs)
if sqlite_libfile:
sqlite_libdir = [os.path.abspath(os.path.dirname(sqlite_libfile))]
print("Sqlite: ",sqlite_incdir,sqlite_libdir,third_party_dir)
if sqlite_incdir and sqlite_libdir:
sqlite_srcs = ['_sqlite/cache.c',
'_sqlite/connection.c',
'_sqlite/cursor.c',
'_sqlite/microprotocols.c',
'_sqlite/module.c',
'_sqlite/prepare_protocol.c',
'_sqlite/row.c',
'_sqlite/statement.c',
'_sqlite/util.c', ]
sqlite_defines = []
if sys.platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
# Enable support for loadable extensions in the sqlite3 module
# if --enable-loadable-sqlite-extensions configure option is used.
if '--enable-loadable-sqlite-extensions' not in sysconfig.get_config_var("CONFIG_ARGS"):
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
if sys.platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
# This way a statically linked custom sqlite gets picked up
# before the dynamic library in /usr/lib.
sqlite_extra_link_args = ('-Wl,-search_paths_first',)
else:
sqlite_extra_link_args = ()
exts.append(Extension('_sqlite3', sqlite_srcs,
define_macros=sqlite_defines,
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
missing.append('_sqlite3')
dbm_order = ['gdbm']
# The standard Unix dbm module:
if platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
if arg.startswith('--with-dbmliborder=')]
if dbm_args:
dbm_order = [arg.split('=')[-1] for arg in dbm_args][-1].split(":")
else:
dbm_order = "ndbm:gdbm:bdb".split(":")
dbmext = None
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
# Some systems have -lndbm, others don't
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
else:
ndbm_libs = []
print("building dbm using ndbm")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_NDBM_H',None),
],
libraries=ndbm_libs)
break
elif cand == "gdbm":
if self.compiler.find_library_file(lib_dirs, 'gdbm'):
gdbm_libs = ['gdbm']
if self.compiler.find_library_file(lib_dirs,
'gdbm_compat'):
gdbm_libs.append('gdbm_compat')
if find_file("gdbm/ndbm.h", inc_dirs, []) is not None:
print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_NDBM_H', None),
],
libraries = gdbm_libs)
break
if find_file("gdbm-ndbm.h", inc_dirs, []) is not None:
print("building dbm using gdbm")
dbmext = Extension(
'_dbm', ['_dbmmodule.c'],
define_macros=[
('HAVE_GDBM_DASH_NDBM_H', None),
],
libraries = gdbm_libs)
break
elif cand == "bdb":
if db_incs is not None:
print("building dbm using bdb")
dbmext = Extension('_dbm', ['_dbmmodule.c'],
library_dirs=dblib_dir,
runtime_library_dirs=dblib_dir,
include_dirs=db_incs,
define_macros=[
('HAVE_BERKDB_H', None),
('DB_DBM_HSEARCH', None),
],
libraries=dblibs)
break
if dbmext is not None:
exts.append(dbmext)
else:
missing.append('_dbm')
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
if ('gdbm' in dbm_order and
self.compiler.find_library_file(lib_dirs, 'gdbm')):
exts.append( Extension('_gdbm', ['_gdbmmodule.c'],
libraries = ['gdbm'] ) )
else:
missing.append('_gdbm')
# Unix-only modules
if platform != 'win32':
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
exts.append( Extension('resource', ['resource.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if (platform not in ['cygwin', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
else:
missing.append('nis')
else:
missing.extend(['nis', 'resource', 'termios'])
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif curses_library == 'curses' and platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
elif (self.compiler.find_library_file(lib_dirs, 'termcap')):
curses_libs = ['curses', 'termcap']
else:
curses_libs = ['curses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
else:
missing.append('_curses')
# If the curses module is enabled, check for the panel module
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
# Andrew Kuchling's zlib module. Note that some versions of zlib
# 1.1.3 have security problems. See CERT Advisory CA-2002-07:
# http://www.cert.org/advisories/CA-2002-07.html
#
# zlib 1.1.4 is fixed, but at least one vendor (RedHat) has decided to
# patch its zlib 1.1.3 package instead of upgrading to 1.1.4. For
# now, we still accept 1.1.3, because we think it's difficult to
# exploit this in Python, and we'd rather make it RedHat's problem
# than our problem <wink>.
#
# You can upgrade zlib to version 1.1.4 yourself by going to
# http://www.gzip.org/zlib/
print("zlib ",inc_dirs,lib_dirs)
zlib_inc = find_file('zlib.h', [], inc_dirs)
print("zlib_inc: ",zlib_inc)
have_zlib = False
if zlib_inc is not None:
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
with open(zlib_h) as fp:
while 1:
line = fp.readline()
if not line:
break
if line.startswith('#define ZLIB_VERSION'):
version = line.split()[2]
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
if sys.platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
exts.append( Extension('zlib', ['zlibmodule.c'],
libraries = ['z'],
extra_link_args = zlib_extra_link_args))
have_zlib = True
else:
missing.append('zlib')
else:
missing.append('zlib')
else:
missing.append('zlib')
# Helper module for various ascii-encoders. Uses zlib for an optimized
# crc32 if we have it. Otherwise binascii uses its own.
if have_zlib:
extra_compile_args = ['-DUSE_ZLIB_CRC32']
libraries = ['z']
extra_link_args = zlib_extra_link_args
else:
extra_compile_args = []
libraries = []
extra_link_args = []
exts.append( Extension('binascii', ['binascii.c'],
extra_compile_args = extra_compile_args,
libraries = libraries,
extra_link_args = extra_link_args) )
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
if sys.platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
exts.append( Extension('bz2', ['bz2module.c'],
libraries = ['bz2'],
extra_link_args = bz2_extra_link_args) )
else:
missing.append('bz2')
# Interface to the Expat XML parser
#
# Expat was written by James Clark and is now maintained by a group of
# developers on SourceForge; see www.libexpat.org for more information.
# The pyexpat module was written by Paul Prescod after a prototype by
# Jack Jansen. The Expat source is included in Modules/expat/. Usage
# of a system shared libexpat.so is possible with --with-system-expat
# configure option.
#
# More information on Expat can be found at www.libexpat.org.
#
if '--with-system-expat' in sysconfig.get_config_var("CONFIG_ARGS"):
expat_inc = []
define_macros = []
expat_lib = ['expat']
expat_sources = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
('HAVE_EXPAT_CONFIG_H', '1'),
]
expat_lib = []
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['pyexpat.c'] + expat_sources
))
# Fredrik Lundh's cElementTree module. Note that this also
# uses expat (via the CAPI hook in pyexpat).
if os.path.isfile(os.path.join(srcdir, 'Modules', '_elementtree.c')):
define_macros.append(('USE_PYEXPAT_CAPI', None))
exts.append(Extension('_elementtree',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
))
else:
missing.append('_elementtree')
# Hye-Shik Chang's CJKCodecs modules.
exts.append(Extension('_multibytecodec',
['cjkcodecs/multibytecodec.c']))
for loc in ('kr', 'jp', 'cn', 'tw', 'hk', 'iso2022'):
exts.append(Extension('_codecs_%s' % loc,
['cjkcodecs/_codecs_%s.c' % loc]))
# Thomas Heller's _ctypes module
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
if platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
elif platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
elif platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
elif platform.startswith('openbsd'):
macros = dict()
libraries = []
elif platform.startswith('netbsd'):
macros = dict()
libraries = []
else: # Linux and other unices
macros = dict()
libraries = ['rt']
if platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
'_multiprocessing/socket_connection.c',
'_multiprocessing/win32_functions.c'
]
else:
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/socket_connection.c'
]
if (sysconfig.get_config_var('HAVE_SEM_OPEN') and not
sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')):
multiprocessing_srcs.append('_multiprocessing/semaphore.c')
if sysconfig.get_config_var('WITH_THREAD'):
exts.append ( Extension('_multiprocessing', multiprocessing_srcs,
define_macros=list(macros.items()),
include_dirs=["Modules/_multiprocessing"]))
else:
missing.append('_multiprocessing')
# End multiprocessing
# Platform-specific libraries
if (platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
or platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
if sys.platform == 'darwin':
exts.append(
Extension('_gestalt', ['_gestalt.c'],
extra_link_args=['-framework', 'Carbon'])
)
exts.append(
Extension('_scproxy', ['_scproxy.c'],
extra_link_args=[
'-framework', 'SystemConfiguration',
'-framework', 'CoreFoundation',
]))
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
return missing
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
from os.path import join, exists
framework_dirs = [
'/Library/Frameworks',
'/System/Library/Frameworks/',
join(os.getenv('HOME'), '/Library/Frameworks')
]
sysroot = macosx_sdk_root()
# Find the directory that contains the Tcl.framework and Tk.framework
# bundles.
# XXX distutils should support -F!
for F in framework_dirs:
# both Tcl.framework and Tk.framework should be present
for fw in 'Tcl', 'Tk':
if is_macosx_sdk_path(F):
if not exists(join(sysroot, F[1:], fw + '.framework')):
break
else:
if not exists(join(F, fw + '.framework')):
break
else:
# ok, F is now directory with both frameworks. Continure
# building
break
else:
# Tk and Tcl frameworks not found. Normal "unix" tkinter search
# will now resume.
return 0
# For 8.4a2, we must add -I options that point inside the Tcl and Tk
# frameworks. In later release we should hopefully be able to pass
# the -F option to gcc, which specifies a framework lookup path.
#
include_dirs = [
join(F, fw + '.framework', H)
for fw in ('Tcl', 'Tk')
for H in ('Headers', 'Versions/Current/PrivateHeaders')
]
# For 8.4a2, the X11 headers are not included. Rather than include a
# complicated search, this is a hard-coded path. It could bail out
# if X11 libs are not found...
include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
# All existing framework builds of Tcl/Tk don't support 64-bit
# architectures.
cflags = sysconfig.get_config_vars('CFLAGS')[0]
archs = re.findall('-arch\s+(\w+)', cflags)
tmpfile = os.path.join(self.build_temp, 'tk.arch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Note: cannot use os.popen or subprocess here, that
# requires extensions that are not available here.
if is_macosx_sdk_path(F):
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(os.path.join(sysroot, F[1:]), tmpfile))
else:
os.system("file %s/Tk.framework/Tk | grep 'for architecture' > %s"%(F, tmpfile))
with open(tmpfile) as fp:
detected_archs = []
for ln in fp:
a = ln.split()[-1]
if a in archs:
detected_archs.append(ln.split()[-1])
os.unlink(tmpfile)
for a in detected_archs:
frameworks.append('-arch')
frameworks.append(a)
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)],
include_dirs = include_dirs,
libraries = [],
extra_compile_args = frameworks[2:],
extra_link_args = frameworks,
)
self.extensions.append(ext)
return 1
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
platform = self.get_platform()
if (platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
# Assume we haven't found any of the libraries or include files
# The versions with dots are used on Unix, and the versions without
# dots on Windows, for detection by cygwin.
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.6', '86', '8.5', '85', '8.4', '84', '8.3', '83',
'8.2', '82', '8.1', '81', '8.0', '80']:
tklib = self.compiler.find_library_file(lib_dirs,
'tk' + version)
tcllib = self.compiler.find_library_file(lib_dirs,
'tcl' + version)
if tklib and tcllib:
# Exit the loop when we've found the Tcl/Tk libraries
break
# Now check for the header files
if tklib and tcllib:
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
if '.' not in dotversion and "bsd" in sys.platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
tcl_include_sub = []
tk_include_sub = []
for dir in inc_dirs:
tcl_include_sub += [dir + os.sep + "tcl" + dotversion]
tk_include_sub += [dir + os.sep + "tk" + dotversion]
tk_include_sub += tcl_include_sub
tcl_includes = find_file('tcl.h', inc_dirs, tcl_include_sub)
tk_includes = find_file('tk.h', inc_dirs, tk_include_sub)
if (tcllib is None or tklib is None or
tcl_includes is None or tk_includes is None):
self.announce("INFO: Can't locate Tcl/Tk libs and/or headers", 2)
return
# OK... everything seems to be present for Tcl/Tk.
include_dirs = [] ; libs = [] ; defs = [] ; added_lib_dirs = []
for dir in tcl_includes + tk_includes:
if dir not in include_dirs:
include_dirs.append(dir)
# Check for various platform-specific directories
if platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
include_dirs.append('/usr/X11R6/include')
added_lib_dirs.append('/usr/X11R6/lib64')
added_lib_dirs.append('/usr/X11R6/lib')
elif os.path.exists('/usr/X11R5/include'):
include_dirs.append('/usr/X11R5/include')
added_lib_dirs.append('/usr/X11R5/lib')
else:
# Assume default location for X11
include_dirs.append('/usr/X11/include')
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
if platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
# Check for BLT extension
if self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT8.0'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT8.0')
elif self.compiler.find_library_file(lib_dirs + added_lib_dirs,
'BLT'):
defs.append( ('WITH_BLT', 1) )
libs.append('BLT')
# Add the Tcl/Tk libraries
libs.append('tk'+ version)
libs.append('tcl'+ version)
if platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
if platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
define_macros=[('WITH_APPINIT', 1)] + defs,
include_dirs = include_dirs,
libraries = libs,
library_dirs = added_lib_dirs,
)
self.extensions.append(ext)
## # Uncomment these lines if you want to play with xxmodule.c
## ext = Extension('xx', ['xxmodule.c'])
## self.extensions.append(ext)
if 'd' not in sys.abiflags:
ext = Extension('xxlimited', ['xxlimited.c'],
define_macros=[('Py_LIMITED_API', 1)])
self.extensions.append(ext)
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
# *** Uncomment and edit for TOGL extension only:
# -DWITH_TOGL togl.c \
# *** Uncomment these for TOGL extension only:
# -lGL -lGLU -lXext -lXmu \
def configure_ctypes_darwin(self, ext):
# Darwin (OS X) uses preconfigured files, in
# the Modules/_ctypes/libffi_osx directory.
srcdir = sysconfig.get_config_var('srcdir')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi_osx'))
sources = [os.path.join(ffi_srcdir, p)
for p in ['ffi.c',
'x86/darwin64.S',
'x86/x86-darwin.S',
'x86/x86-ffi_darwin.c',
'x86/x86-ffi64.c',
'powerpc/ppc-darwin.S',
'powerpc/ppc-darwin_closure.S',
'powerpc/ppc-ffi_darwin.c',
'powerpc/ppc64-darwin_closure.S',
]]
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_srcdir, 'include'),
os.path.join(ffi_srcdir, 'powerpc')]
ext.include_dirs.extend(include_dirs)
ext.sources.extend(sources)
return True
def configure_ctypes(self, ext):
if not self.use_system_libffi:
if sys.platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
ffi_builddir = os.path.join(self.build_temp, 'libffi')
ffi_srcdir = os.path.abspath(os.path.join(srcdir, 'Modules',
'_ctypes', 'libffi'))
ffi_configfile = os.path.join(ffi_builddir, 'fficonfig.py')
from distutils.dep_util import newer_group
config_sources = [os.path.join(ffi_srcdir, fname)
for fname in os.listdir(ffi_srcdir)
if os.path.isfile(os.path.join(ffi_srcdir, fname))]
if self.force or newer_group(config_sources,
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
if cross_compile:
config_args = sysconfig.get_config_var("CONFIG_ARGS")
else:
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
config_args = ""
cmd = "cd %s && env CFLAGS='' '%s/configure' %s" \
% (ffi_builddir, ffi_srcdir, config_args)
res = os.system(cmd)
if res or not os.path.exists(ffi_configfile):
print("Failed to configure _ctypes module")
return False
fficonfig = {}
with open(ffi_configfile) as f:
exec(f.read(), globals(), fficonfig)
# Add .S (preprocessed assembly) to C compiler source extensions.
self.compiler.src_extensions.append('.S')
include_dirs = [os.path.join(ffi_builddir, 'include'),
ffi_builddir,
os.path.join(ffi_srcdir, 'src')]
extra_compile_args = fficonfig['ffi_cflags'].split()
ext.sources.extend(os.path.join(ffi_srcdir, f) for f in
fficonfig['ffi_sources'])
ext.include_dirs.extend(include_dirs)
ext.extra_compile_args.extend(extra_compile_args)
return True
def detect_ctypes(self, inc_dirs, lib_dirs):
self.use_system_libffi = False
include_dirs = []
extra_compile_args = []
extra_link_args = []
sources = ['_ctypes/_ctypes.c',
'_ctypes/callbacks.c',
'_ctypes/callproc.c',
'_ctypes/stgdict.c',
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
if sys.platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
include_dirs.append('_ctypes/darwin')
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
elif sys.platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
# the assembler code to be PIC.
# This only works with GCC; the Sun compiler likely refuses
# this option. If you want to compile ctypes with the Sun
# compiler, please research a proper solution, instead of
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
elif sys.platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
libraries=[],
sources=sources,
depends=depends)
ext_test = Extension('_ctypes_test',
sources=['_ctypes/_ctypes_test.c'])
self.extensions.extend([ext, ext_test])
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
if sys.platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')
ffi_inc = [sysconfig.get_config_var("LIBFFI_INCLUDEDIR")]
if not ffi_inc or ffi_inc[0] == '':
ffi_inc = find_file('ffi.h', [], inc_dirs)
if ffi_inc is not None:
ffi_h = ffi_inc[0] + '/ffi.h'
with open(ffi_h) as fp:
while 1:
line = fp.readline()
if not line:
ffi_inc = None
break
if line.startswith('#define LIBFFI_H'):
break
ffi_lib = None
if ffi_inc is not None:
for lib_name in ('ffi_convenience', 'ffi_pic', 'ffi'):
if (self.compiler.find_library_file(lib_dirs, lib_name)):
ffi_lib = lib_name
break
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
class PyBuildInstall(install):
# Suppress the warning about installation into the lib_dynload
# directory, which is not in sys.path when running Python during
# installation:
def initialize_options (self):
install.initialize_options(self)
self.warn_dir=0
class PyBuildInstallLib(install_lib):
# Do exactly what install_lib does but make sure correct access modes get
# set on installed directories and files. All installed files with get
# mode 644 unless they are a shared library in which case they will get
# mode 755. All installed directories will get mode 755.
so_ext = sysconfig.get_config_var("SO")
def install(self):
outfiles = install_lib.install(self)
self.set_file_modes(outfiles, 0o644, 0o755)
self.set_dir_modes(self.install_dir, 0o755)
return outfiles
def set_file_modes(self, files, defaultMode, sharedLibMode):
if not self.is_chmod_supported(): return
if not files: return
for filename in files:
if os.path.islink(filename): continue
mode = defaultMode
if filename.endswith(self.so_ext): mode = sharedLibMode
log.info("changing mode of %s to %o", filename, mode)
if not self.dry_run: os.chmod(filename, mode)
def set_dir_modes(self, dirname, mode):
if not self.is_chmod_supported(): return
for dirpath, dirnames, fnames in os.walk(dirname):
if os.path.islink(dirpath):
continue
log.info("changing mode of %s to %o", dirpath, mode)
if not self.dry_run: os.chmod(dirpath, mode)
def is_chmod_supported(self):
return hasattr(os, 'chmod')
class PyBuildScripts(build_scripts):
def copy_scripts(self):
outfiles, updated_files = build_scripts.copy_scripts(self)
fullversion = '-{0[0]}.{0[1]}'.format(sys.version_info)
minoronly = '.{0[1]}'.format(sys.version_info)
newoutfiles = []
newupdated_files = []
for filename in outfiles:
if filename.endswith('2to3'):
newfilename = filename + fullversion
else:
newfilename = filename + minoronly
log.info('renaming {} to {}'.format(filename, newfilename))
os.rename(filename, newfilename)
newoutfiles.append(newfilename)
if filename in updated_files:
newupdated_files.append(newfilename)
return newoutfiles, newupdated_files
SUMMARY = """
Python is an interpreted, interactive, object-oriented programming
language. It is often compared to Tcl, Perl, Scheme or Java.
Python combines remarkable power with very clear syntax. It has
modules, classes, exceptions, very high level dynamic data types, and
dynamic typing. There are interfaces to many system calls and
libraries, as well as to various windowing systems (X11, Motif, Tk,
Mac, MFC). New built-in modules are easily written in C or C++. Python
is also usable as an extension language for applications that need a
programmable interface.
The Python implementation is portable: it runs on many brands of UNIX,
on Windows, DOS, OS/2, Mac, Amiga... If your favorite system isn't
listed here, it may still be supported, if there's a C compiler for
it. Ask around on comp.lang.python -- or just try compiling Python
yourself.
"""
CLASSIFIERS = """
Development Status :: 6 - Mature
License :: OSI Approved :: Python Software Foundation License
Natural Language :: English
Programming Language :: C
Programming Language :: Python
Topic :: Software Development
"""
def main():
# turn off warnings when deprecated modules are imported
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
setup(# PyPI Metadata (PEP 301)
name = "Python",
version = sys.version.split()[0],
url = "http://www.python.org/%s" % sys.version[:3],
maintainer = "Guido van Rossum and the Python community",
maintainer_email = "python-dev@python.org",
description = "A high-level object-oriented programming language",
long_description = SUMMARY.strip(),
license = "PSF license",
classifiers = [x for x in CLASSIFIERS.split("\n") if x],
platforms = ["Many"],
# Build info
cmdclass = {'build_ext': PyBuildExt,
'build_scripts': PyBuildScripts,
'install': PyBuildInstall,
'install_lib': PyBuildInstallLib},
# The struct module is defined here, because build_ext won't be
# called unless there's at least one extension module defined.
ext_modules=[Extension('_struct', ['_struct.c'])],
# If you change the scripts installed here, you also need to
# check the PyBuildScripts command above, and change the links
# created by the bininstall target in Makefile.pre.in
scripts = ["Tools/scripts/pydoc3", "Tools/scripts/idle3",
"Tools/scripts/2to3"]
)
# --install-platlib
if __name__ == '__main__':
main()
| apache-2.0 |
2014c2g4/2015cda0623 | static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
singleyoungtao/myblog-flask | migrations/versions/d66f086b258_user_information.py | 127 | 1101 | """user information
Revision ID: d66f086b258
Revises: 56ed7d33de8d
Create Date: 2013-12-29 23:50:49.566954
"""
# revision identifiers, used by Alembic.
revision = 'd66f086b258'
down_revision = '56ed7d33de8d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
### end Alembic commands ###
| mit |
akurtakov/Pydev | plugins/org.python.pydev.jython/Lib/json/tests/test_scanstring.py | 97 | 3784 | import sys
from json.tests import PyTest, CTest
class TestScanstring(object):
def test_scanstring(self):
scanstring = self.json.decoder.scanstring
self.assertEqual(
scanstring('"z\\ud834\\udd20x"', 1, None, True),
(u'z\U0001d120x', 16))
if sys.maxunicode == 65535:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 6))
else:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
(u'z\U0001d120x', 5))
self.assertEqual(
scanstring('"\\u007b"', 1, None, True),
(u'{', 8))
self.assertEqual(
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
(u'A JSON payload should be an object or array, not a string.', 60))
self.assertEqual(
scanstring('["Unclosed array"', 2, None, True),
(u'Unclosed array', 17))
self.assertEqual(
scanstring('["extra comma",]', 2, None, True),
(u'extra comma', 14))
self.assertEqual(
scanstring('["double extra comma",,]', 2, None, True),
(u'double extra comma', 21))
self.assertEqual(
scanstring('["Comma after the close"],', 2, None, True),
(u'Comma after the close', 24))
self.assertEqual(
scanstring('["Extra close"]]', 2, None, True),
(u'Extra close', 14))
self.assertEqual(
scanstring('{"Extra comma": true,}', 2, None, True),
(u'Extra comma', 14))
self.assertEqual(
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
(u'Extra value after close', 26))
self.assertEqual(
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
(u'Illegal expression', 21))
self.assertEqual(
scanstring('{"Illegal invocation": alert()}', 2, None, True),
(u'Illegal invocation', 21))
self.assertEqual(
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
(u'Numbers cannot have leading zeroes', 37))
self.assertEqual(
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
(u'Numbers cannot be hex', 24))
self.assertEqual(
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
(u'Too deep', 30))
self.assertEqual(
scanstring('{"Missing colon" null}', 2, None, True),
(u'Missing colon', 16))
self.assertEqual(
scanstring('{"Double colon":: null}', 2, None, True),
(u'Double colon', 15))
self.assertEqual(
scanstring('{"Comma instead of colon", null}', 2, None, True),
(u'Comma instead of colon', 25))
self.assertEqual(
scanstring('["Colon instead of comma": false]', 2, None, True),
(u'Colon instead of comma', 25))
self.assertEqual(
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
def test_issue3623(self):
self.assertRaises(ValueError, self.json.decoder.scanstring, b"xxx", 1,
"xxx")
self.assertRaises(UnicodeDecodeError,
self.json.encoder.encode_basestring_ascii, b"xx\xff")
def test_overflow(self):
with self.assertRaises(OverflowError):
self.json.decoder.scanstring(b"xxx", sys.maxsize+1)
class TestPyScanstring(TestScanstring, PyTest): pass
class TestCScanstring(TestScanstring, CTest): pass
| epl-1.0 |
yeti-platform/yeti | core/web/api/export.py | 1 | 3616 | from __future__ import unicode_literals
import os
from flask import send_from_directory, make_response
from flask_classy import route
from mongoengine.errors import DoesNotExist
from core.web.api.crud import CrudApi
from core import exports
from core.web.api.api import render
from core.helpers import string_to_timedelta
from core.observables import Tag
from core.web.helpers import requires_permissions
class ExportTemplate(CrudApi):
template = "export_template_api.html"
objectmanager = exports.ExportTemplate
class Export(CrudApi):
template = "export_api.html"
template_single = "export_api_single.html"
objectmanager = exports.Export
@route("/<string:id>/content")
@requires_permissions("read")
def content(self, id):
"""Return export content
Returns a given export's content.
:query ObjectID id: Export ID
:resheader X-Yeti-Export-MD5: The MD5 hash of the exported content. Use it to check the export's integrity
"""
try:
e = self.objectmanager.objects.get(id=id)
except DoesNotExist:
return render({"error": "No Export found for id {}".format(id)}), 404
if e.output_dir.startswith("/"):
d = e.output_dir
else:
d = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
),
e.output_dir,
)
response = make_response(
send_from_directory(
d, e.name, as_attachment=True, attachment_filename=e.name
)
)
response.headers["X-Yeti-Export-MD5"] = e.hash_md5
return response
@route("/<string:id>/refresh", methods=["POST"])
@requires_permissions("refresh")
def refresh(self, id):
"""Refresh an export
Manually executes an export if it is not already exporting.
:query ObjectID id: Export ID
:>json ObjectID id: The export's ObjectID
"""
exports.execute_export.delay(id)
return render({"id": id})
@route("/<string:id>/toggle", methods=["POST"])
@requires_permissions("toggle")
def toggle(self, id):
"""Toggle an export
Toggles an export. A deactivated export will not execute when called (manually or scheduled)
:query ObjectID id: Export ID
:>json ObjectID id: The export's ObjectID
:>json boolean status: The result of the toggle operation (``true`` means the export has been enabled, ``false`` means it has been disabled)
"""
e = self.objectmanager.objects.get(id=id)
e.enabled = not e.enabled
e.save()
return render({"id": id, "status": e.enabled})
def _parse_request(self, json):
params = json
params["frequency"] = string_to_timedelta(params.get("frequency", "1:00:00"))
params["ignore_tags"] = [
Tag.objects.get(name=name.strip())
for name in params["ignore_tags"].split(",")
if name.strip()
]
params["include_tags"] = [
Tag.objects.get(name=name.strip())
for name in params["include_tags"].split(",")
if name.strip()
]
params["exclude_tags"] = [
Tag.objects.get(name=name.strip())
for name in params["exclude_tags"].split(",")
if name.strip()
]
params["template"] = exports.ExportTemplate.objects.get(name=params["template"])
return params
| apache-2.0 |
AnishShah/tensorflow | tensorflow/python/training/checkpoint_management.py | 9 | 29040 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export("train.generate_checkpoint_state_proto")
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
all_model_checkpoint_timestamps: A list of floats, indicating the number of
seconds since the Epoch when each checkpoint was generated.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
Raises:
ValueError: If `all_model_checkpoint_timestamps` was provided but its length
does not match `all_model_checkpoint_paths`.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
if (all_model_checkpoint_timestamps
and (len(all_model_checkpoint_timestamps)
!= len(all_model_checkpoint_paths))):
raise ValueError(
("Checkpoint timestamps, if provided, must match checkpoint paths (got "
"paths %s and timestamps %s)")
% (all_model_checkpoint_paths, all_model_checkpoint_timestamps))
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
return coord_checkpoint_proto
@tf_export("train.update_checkpoint_state")
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
def update_checkpoint_state_internal(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from "
+ checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
@tf_export("train.checkpoint_exists")
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@tf_export("train.get_checkpoint_mtimes")
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
@tf_export("train.remove_checkpoint")
def remove_checkpoint(checkpoint_prefix,
checkpoint_format_version=saver_pb2.SaverDef.V2,
meta_graph_suffix="meta"):
"""Removes a checkpoint given by `checkpoint_prefix`.
Args:
checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result
of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to
`SaverDef.V2`.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
_delete_file_if_exists(
meta_graph_filename(checkpoint_prefix, meta_graph_suffix))
if checkpoint_format_version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
_delete_file_if_exists(checkpoint_prefix + ".index")
_delete_file_if_exists(checkpoint_prefix + ".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
_delete_file_if_exists(checkpoint_prefix)
def _delete_file_if_exists(filespec):
"""Deletes files matching `filespec`."""
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
suffixed_filename = ".".join([basename, meta_graph_suffix])
return suffixed_filename
# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?
class CheckpointManager(object):
"""Deletes old checkpoints.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.contrib.checkpoint.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(self, checkpoint, directory,
max_to_keep, keep_checkpoint_every_n_hours=None):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, "ckpt")
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quckly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
remove_checkpoint(filename)
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
def save(self, session=None, checkpoint_number=None):
"""Creates a new checkpoint and manages it.
Args:
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properies.
"""
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
else:
if session is None:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
self._sweep()
self._record_state()
return save_path
| apache-2.0 |
selfcommit/gaedav | pyxml/dom/html/HTMLAreaElement.py | 4 | 2963 | ########################################################################
#
# File Name: HTMLAreaElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from pyxml.dom import Node
from pyxml.dom.html.HTMLElement import HTMLElement
class HTMLAreaElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="AREA"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_accessKey(self):
return self.getAttribute("ACCESSKEY")
def _set_accessKey(self, value):
self.setAttribute("ACCESSKEY", value)
def _get_alt(self):
return self.getAttribute("ALT")
def _set_alt(self, value):
self.setAttribute("ALT", value)
def _get_coords(self):
return self.getAttribute("COORDS")
def _set_coords(self, value):
self.setAttribute("COORDS", value)
def _get_href(self):
return self.getAttribute("HREF")
def _set_href(self, value):
self.setAttribute("HREF", value)
def _get_noHref(self):
return self.hasAttribute("NOHREF")
def _set_noHref(self, value):
if value:
self.setAttribute("NOHREF", "NOHREF")
else:
self.removeAttribute("NOHREF")
def _get_shape(self):
return string.capitalize(self.getAttribute("SHAPE"))
def _set_shape(self, value):
self.setAttribute("SHAPE", value)
def _get_tabIndex(self):
value = self.getAttribute("TABINDEX")
if value:
return int(value)
return 0
def _set_tabIndex(self, value):
self.setAttribute("TABINDEX", str(value))
def _get_target(self):
return self.getAttribute("TARGET")
def _set_target(self, value):
self.setAttribute("TARGET", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"accessKey" : _get_accessKey,
"alt" : _get_alt,
"coords" : _get_coords,
"href" : _get_href,
"noHref" : _get_noHref,
"shape" : _get_shape,
"tabIndex" : _get_tabIndex,
"target" : _get_target
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"accessKey" : _set_accessKey,
"alt" : _set_alt,
"coords" : _set_coords,
"href" : _set_href,
"noHref" : _set_noHref,
"shape" : _set_shape,
"tabIndex" : _set_tabIndex,
"target" : _set_target
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
| lgpl-2.1 |
akarki15/mozillians | vendor-local/lib/python/tablib/packages/odf/dc.py | 100 | 2223 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DCNS
from element import Element
# Autogenerated
def Creator(**args):
return Element(qname = (DCNS,'creator'), **args)
def Date(**args):
return Element(qname = (DCNS,'date'), **args)
def Description(**args):
return Element(qname = (DCNS,'description'), **args)
def Language(**args):
return Element(qname = (DCNS,'language'), **args)
def Subject(**args):
return Element(qname = (DCNS,'subject'), **args)
def Title(**args):
return Element(qname = (DCNS,'title'), **args)
# The following complete the Dublin Core elements, but there is no
# guarantee a compliant implementation of OpenDocument will preserve
# these elements
#def Contributor(**args):
# return Element(qname = (DCNS,'contributor'), **args)
#def Coverage(**args):
# return Element(qname = (DCNS,'coverage'), **args)
#def Format(**args):
# return Element(qname = (DCNS,'format'), **args)
#def Identifier(**args):
# return Element(qname = (DCNS,'identifier'), **args)
#def Publisher(**args):
# return Element(qname = (DCNS,'publisher'), **args)
#def Relation(**args):
# return Element(qname = (DCNS,'relation'), **args)
#def Rights(**args):
# return Element(qname = (DCNS,'rights'), **args)
#def Source(**args):
# return Element(qname = (DCNS,'source'), **args)
#def Type(**args):
# return Element(qname = (DCNS,'type'), **args)
| bsd-3-clause |
igemsoftware2017/USTC-Software-2017 | tests/notices/test_send.py | 1 | 1024 | from rest_framework.test import APITestCase
from biohub.notices import tool
from biohub.accounts.models import User
class Test(APITestCase):
def setUp(self):
self.me = User.objects.create_test_user('me')
self.you = User.objects.create_test_user('you')
self.dispatcher = tool.Dispatcher('test')
def test_basic_send(self):
notice = self.dispatcher.send(
self.me,
'User {{user.username}} {{category}}')
self.assertEqual(notice.message, 'User %s test' % self.me.username)
self.assertEqual('test', notice.category)
def test_url(self):
notice = self.dispatcher.send(
self.me,
'{{"title"|url:user}}')
self.assertEqual('[[title]]((user))((%s))' % self.me.username, notice.message)
def test_group_send(self):
notices = self.dispatcher.group_send(
[self.me, self.you],
'{{user.username}}')
self.assertListEqual(['me', 'you'], [x.message for x in notices])
| gpl-3.0 |
eerwitt/tensorflow | tensorflow/contrib/ndlstm/python/lstm2d.py | 69 | 6062 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A small library of functions dealing with LSTMs applied to images.
Tensors in this library generally have the shape (num_images, height, width,
depth).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ndlstm.python import lstm1d
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
def _shape(tensor):
"""Get the shape of a tensor as an int list."""
return tensor.get_shape().as_list()
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = array_ops.transpose(tensor, [2, 0, 1, 3])
return array_ops.reshape(transposed,
[width, num_image_batches * height, depth])
def sequence_to_images(tensor, num_image_batches):
"""Convert a batch of sequences into a batch of images.
Args:
tensor: (num_steps, num_batches, depth) sequence tensor
num_image_batches: the number of image batches
Returns:
(num_images, height, width, depth) tensor
"""
width, num_batches, depth = _shape(tensor)
height = num_batches // num_image_batches
reshaped = array_ops.reshape(tensor,
[width, num_image_batches, height, depth])
return array_ops.transpose(reshaped, [1, 2, 0, 3])
def horizontal_lstm(images, num_filters_out, scope=None):
"""Run an LSTM bidirectionally over all the rows of each image.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor, where
num_steps is width and new num_batches is num_image_batches * height
"""
with variable_scope.variable_scope(scope, "HorizontalLstm", [images]):
batch_size, _, _, _ = _shape(images)
sequence = images_to_sequence(images)
with variable_scope.variable_scope("lr"):
hidden_sequence_lr = lstm1d.ndlstm_base(sequence, num_filters_out // 2)
with variable_scope.variable_scope("rl"):
hidden_sequence_rl = (lstm1d.ndlstm_base(
sequence, num_filters_out - num_filters_out // 2, reverse=1))
output_sequence = array_ops.concat([hidden_sequence_lr, hidden_sequence_rl],
2)
output = sequence_to_images(output_sequence, batch_size)
return output
def separable_lstm(images, num_filters_out, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor
"""
with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
hidden = horizontal_lstm(images, nhidden)
with variable_scope.variable_scope("vertical"):
transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
return output
def reduce_to_sequence(images, num_filters_out, scope=None):
"""Reduce an image to a sequence by scanning an LSTM vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
scope: optional scope name
Returns:
A (width, num_images, num_filters_out) sequence.
"""
with variable_scope.variable_scope(scope, "ReduceToSequence", [images]):
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
reduced = lstm1d.sequence_to_final(reshaped, num_filters_out)
output = array_ops.reshape(reduced, [batch_size, width, num_filters_out])
return output
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None):
"""Reduce an image to a final state by running two LSTMs.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth (defaults to num_filters_out)
scope: optional scope name
Returns:
A (num_images, num_filters_out) batch.
"""
with variable_scope.variable_scope(scope, "ReduceToFinal", [images]):
nhidden = nhidden or num_filters_out
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
with variable_scope.variable_scope("reduce1"):
reduced = lstm1d.sequence_to_final(reshaped, nhidden)
transposed_hidden = array_ops.reshape(reduced,
[batch_size, width, nhidden])
hidden = array_ops.transpose(transposed_hidden, [1, 0, 2])
with variable_scope.variable_scope("reduce2"):
output = lstm1d.sequence_to_final(hidden, num_filters_out)
return output
| apache-2.0 |
strahlex/machinekit | share/gscreen/skins/industrial/industrial_handler.py | 20 | 17237 | import hal
import gtk
import gladevcp.makepins # needed for the dialog's calulator widget
import pango
_MAN = 0;_MDI = 1;_AUTO = 2;_LOCKTOGGLE = 1
# This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# This will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
# This is a new method that calls a gscreen method to toggle the DRO units
# Gscreen's regular unit button saves the state
# for startup, This one just changes it for the session
def on_metric_select_clicked(self,widget):
data = (self.data.dro_units -1) * -1
self.gscreen.set_dro_units(data,False)
for i in ("1","2","3"):
for letter in self.data.axis_list:
axis = "dro_%s%s"% (letter,i)
try:
self.widgets[axis].set_property("display_units_mm",data)
except:
pass
def on_diameter_mode_pressed(self, widget):
data = widget.get_active()
print "switch diam mode",data
self.gscreen.set_diameter_mode(data)
for i in ("1","2","3"):
axis = "dro_x%s"% (i)
if data:
self.widgets[axis].set_to_diameter()
else:
self.widgets[axis].set_to_radius()
# This is a new method for our button
# we selected this method name in the glade file as a signal callback
def on_estop_clicked(self,*args):
print "estop"
if self.data.estopped:
self.emc.estop_reset(1)
self.gscreen.add_alarm_entry("Machine Estop Reset")
else:
self.emc.machine_off(1)
self.emc.estop(1)
self.widgets.on_label.set_text("Machine Off")
self.gscreen.add_alarm_entry("Machine Estopped and Powered Off")
return True
# This is a new method for our new button
# we selected this method name in the glade file as a signal callback
def on_machine_state_clicked(self,widget):
if self.data.estopped:
return
elif not self.data.machine_on:
self.emc.machine_on(1)
self.widgets.on_label.set_text("Machine On")
self.gscreen.add_alarm_entry("Machine powered on")
else:
self.emc.machine_off(1)
self.widgets.on_label.set_text("Machine Off")
self.gscreen.add_alarm_entry("Machine powered off")
# display the main tab and set the mode to setup
def on_setup_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _MAN,_MDI,_AUTO
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# display the main tab and set the mode to run
def on_run_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _AUTO,_MAN,_MDI
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# display the main tab and set the mode to MDI
def on_mdi_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(0)
self.data.mode_order = _MDI,_MAN,_AUTO
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.gscreen.mode_changed(self.data.mode_order[0])
self.toggle_modes(widget)
# This is called when the system button is toggled
# If the page is not showing it displays a unlock code dialog
# unless you have already unlocked the page.
# if that returns true then the page is shown
# otherwise the button is untoggled and the page is not shown
# if you press the system buttonn when the system page is already showing
# it will relock the page
def on_system_button_clicked(self,widget):
if self.widgets.notebook_main.get_current_page() == 4:
self.gscreen.block("system_button")
widget.set_active(True)
self.gscreen.unblock("system_button")
global _LOCKTOGGLE
_LOCKTOGGLE=1
self.widgets.system_button.set_label(" System\n(Locked)")
self.gscreen.add_alarm_entry("System page re-locked")
self.on_setup_button_clicked(self.widgets.setup_button)
return
if not self.system_dialog():
self.gscreen.block("system_button")
widget.set_active(False)
self.gscreen.unblock("system_button")
return
self.widgets.notebook_main.set_current_page(4)
self.toggle_modes(widget)
self.widgets.system_button.set_label(" System\n ")
# Display the tooledit tab
def on_tooledit_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(3)
self.toggle_modes(widget)
# Display the offsetpage tab
def on_offsetpage_button_clicked(self,widget):
self.widgets.notebook_main.set_current_page(2)
self.toggle_modes(widget)
# This toggles the buttons so only one is presses at any one time
def toggle_modes(self,widget):
temp = "setup_button","mdi_button","run_button","tooledit_button","system_button","offsetpage_button"
for i in temp:
state = False
if self.widgets[i] == widget: state = True
self.gscreen.block(i)
self.widgets[i].set_active(state)
self.gscreen.unblock(i)
def on_button_edit_clicked(self,widget):
state = widget.get_active()
if not state:
self.gscreen.edited_gcode_check()
self.widgets.notebook_main.set_current_page(0)
self.widgets.notebook_main.set_show_tabs(not (state))
self.gscreen.edit_mode(state)
if not state and self.widgets.button_full_view.get_active():
self.gscreen.set_full_graphics_view(True)
if self.data.edit_mode:
self.widgets.mode_select_box.hide()
self.widgets.search_box.show()
self.widgets.button_edit.set_label("Exit\nEdit")
else:
self.widgets.mode_select_box.show()
self.widgets.search_box.hide()
self.widgets.button_edit.set_label("Edit")
self.widgets.notebook_main.set_show_tabs(False)
# This dialog is for unlocking the system tab
# The unlock code number is defined at the top of the page
def system_dialog(self):
global _LOCKTOGGLE
if _LOCKTOGGLE == 0: return True
dialog = gtk.Dialog("Enter System Unlock Code",
self.widgets.window1,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
label = gtk.Label("Enter System Unlock Code")
label.modify_font(pango.FontDescription("sans 20"))
calc = gladevcp.Calculator()
dialog.vbox.pack_start(label)
dialog.vbox.add(calc)
calc.set_value("")
calc.set_property("font","sans 20")
calc.set_editable(True)
calc.entry.connect("activate", lambda w : dialog.emit('response',gtk.RESPONSE_ACCEPT))
dialog.parse_geometry("400x400")
dialog.set_decorated(False)
dialog.show_all()
calc.num_pad_only(True)
calc.integer_entry_only(True)
response = dialog.run()
code = calc.get_value()
dialog.destroy()
if response == gtk.RESPONSE_ACCEPT:
if code == int(self.data.unlock_code):
self.gscreen.add_alarm_entry("System page unlocked")
_LOCKTOGGLE = 0
return True
_LOCKTOGGLE = 1
return False
def on_abs_colorbutton_color_set(self,widget):
self.gscreen.set_abs_color()
color = self.data.abs_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s1"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_rel_colorbutton_color_set(self,widget):
self.gscreen.set_rel_color()
color = self.data.rel_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s2"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_dtg_colorbutton_color_set(self,widget):
self.gscreen.set_dtg_color()
color = self.data.dtg_color
fg_color = pango.AttrForeground(color[0],color[1],color[2], 0, 11)
for i in self.data.axis_list:
axis = "dro_%s3"% i
attr = self.widgets[axis].get_attributes()
attr.insert(fg_color)
self.widgets[axis].set_attributes(attr)
def on_hal_status_not_all_homed(self,widget,data):
temp =[]
for letter in self.data.axis_list:
axnum = "xyzabcuvws".index(letter)
if str(axnum) in data:
self.widgets["home_%s"%letter].set_text(" ")
temp.append(" %s"%letter.upper())
self.gscreen.add_alarm_entry(_("There are unhomed axes: %s"%temp))
def on_hal_status_axis_homed(self,widget,data):
for letter in self.data.axis_list:
axnum = "xyzabcuvws".index(letter)
if str(axnum) in data:
self.widgets["home_%s"%letter].set_text("*")
else:
self.widgets["home_%s"%letter].set_text(" ")
def on_show_dtg_pressed(self, widget):
data = widget.get_active()
self.widgets.dtg_vbox.set_visible(data)
self.gscreen.set_show_dtg(data)
# Connect to gscreens regular signals and add a couple more
def connect_signals(self,handlers):
self.gscreen.connect_signals(handlers)
# connect to handler file callbacks:
self.gscreen.widgets.metric_select.connect("clicked", self.on_metric_select_clicked)
self.gscreen.widgets.diameter_mode.connect("clicked", self.on_diameter_mode_pressed)
temp = "setup_button","mdi_button","run_button","tooledit_button","system_button","offsetpage_button"
for cb in temp:
i = "_sighandler_%s"% (cb)
self.data[i] = int(self.widgets[cb].connect("toggled", self["on_%s_clicked"%cb]))
self.widgets.hal_status.connect("not-all-homed",self.on_hal_status_not_all_homed)
self.widgets.hal_status.connect("homed",self.on_hal_status_axis_homed)
self.widgets.abs_colorbutton.connect("color-set", self.on_abs_colorbutton_color_set)
self.widgets.rel_colorbutton.connect("color-set", self.on_rel_colorbutton_color_set)
self.widgets.dtg_colorbutton.connect("color-set", self.on_dtg_colorbutton_color_set)
self.widgets.unlock_number.connect("value-changed",self.gscreen.on_unlock_number_value_changed)
self.widgets.show_dtg.connect("clicked", self.on_show_dtg_pressed)
# We don't want Gscreen to initialize ALL it's regular widgets because this custom
# screen doesn't have them all -just most of them. So we call the ones we want
def initialize_widgets(self):
self.gscreen.init_show_windows()
self.gscreen.init_dynamic_tabs()
#self.gscreen.init_axis_frames()
#self.gscreen.init_dro_colors()
self.gscreen.init_screen2()
self.gscreen.init_fullscreen1()
self.gscreen.init_gremlin()
self.gscreen.init_manual_spindle_controls()
self.gscreen.init_dro_colors()
self.init_dro() # local function
self.gscreen.init_audio()
self.gscreen.init_desktop_notify()
self.gscreen.init_statusbar()
self.gscreen.init_entry()
self.gscreen.init_tooleditor()
self.gscreen.init_offsetpage()
self.gscreen.init_embeded_terminal()
self.gscreen.init_themes()
self.gscreen.init_screen1_geometry()
self.gscreen.init_running_options()
self.gscreen.init_hide_cursor()
#self.gscreen.init_mode()
self.gscreen.mode_changed(self.data.mode_order[0])
self.gscreen.init_sensitive_on_off()
self.gscreen.init_sensitive_run_idle()
self.gscreen.init_sensitive_all_homed()
self.gscreen.init_sensitive_override_mode()
self.gscreen.init_sensitive_graphics_mode()
self.gscreen.init_sensitive_origin_mode()
self.init_sensitive_edit_mode() # local function
for i in ("setup_button","mdi_button","run_button","tooledit_button","offsetpage_button","button_index_tool"):
self.data.sensitive_override_mode.append(i)
self.data.sensitive_graphics_mode.append(i)
self.data.sensitive_origin_mode.append(i)
self.widgets["spindle-at-speed"].set_property("on_color","black")
self.gscreen.init_unlock_code()
self.gscreen.init_state()
for i in self.data.axis_list:
self.widgets["dro_%s1"%i].show()
self.widgets["dro_%s2"%i].show()
self.widgets["dro_%s3"%i].show()
self.widgets["axis_%s"%i].show()
self.widgets["home_%s"%i].show()
#self.widgets.offsetpage1.set_highlight_color("lightblue")
self.widgets.offsetpage1.set_font("sans 18")
self.widgets.offsetpage1.set_row_visible("1",False)
self.widgets.tooledit1.set_font("sans 18")
if self.data.embedded_keyboard:
self.gscreen.launch_keyboard()
def init_sensitive_edit_mode(self):
self.data.sensitive_edit_mode = ["button_graphics","button_override","button_restart","button_cycle_start","button_single_step",
"run_button","setup_button","mdi_button","system_button","tooledit_button","ignore_limits",
"offsetpage_button"]
def init_dro(self):
self.on_abs_colorbutton_color_set(None)
self.on_rel_colorbutton_color_set(None)
self.on_dtg_colorbutton_color_set(None)
self.widgets.show_dtg.set_active(self.data.show_dtg)
self.on_show_dtg_pressed(self.widgets.show_dtg)
self.gscreen.init_dro()
data = self.data.dro_units
for i in ("1","2","3"):
for letter in self.data.axis_list:
axis = "dro_%s%s"% (letter,i)
try:
self.widgets[axis].set_property("display_units_mm",data)
except:
pass
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.update_mdi_spindle_button() # local method
self.gscreen.update_spindle_bar()
#self.gscreen.update_dro()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_aux_coolant_pins()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.gscreen.update_coolant_leds()
self.gscreen.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_limit_override()
self.gscreen.update_override_label()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
# spindle controls
def update_mdi_spindle_button(self):
self.widgets.at_speed_label.set_label(_("%d RPM"%abs(self.data.spindle_speed)))
label = self.widgets.spindle_control.get_label()
speed = self.data.spindle_speed
if speed == 0 and not label == _("Start"):
temp = _("Start")
self.widgets["spindle-at-speed"].set_property("on_color","black")
elif speed and not label == _("Stop"):
temp = _("Stop")
self.widgets["spindle-at-speed"].set_property("on_color","green")
else: return
self.widgets.spindle_control.set_label(temp)
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, item, value):
return setattr(self, item, value)
# standard handler call
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| lgpl-2.1 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/PyQt4/uic/port_v3/proxy_base.py | 4 | 1499 | #############################################################################
##
## Copyright (c) 2013 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at sales@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
from PyQt4.uic.Compiler.proxy_metaclass import ProxyMetaclass
class ProxyBase(metaclass=ProxyMetaclass):
""" A base class for proxies using Python v3 syntax for setting the
meta-class.
"""
| mit |
was4444/chromium.src | third_party/typ/typ/fakes/tests/test_result_server_fake_test.py | 81 | 1298 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typ.fakes import test_result_server_fake
from typ import Host
class TestResultServerFakeTest(unittest.TestCase):
def test_basic_upload(self):
host = Host()
server = None
posts = []
try:
server = test_result_server_fake.start()
url = 'http://%s:%d/testfile/upload' % server.server_address
if server:
resp = host.fetch(url, 'foo=bar')
finally:
if server:
posts = server.stop()
self.assertEqual(posts, [('post', '/testfile/upload',
'foo=bar'.encode('utf8'))])
self.assertNotEqual(server.log.getvalue(), '')
| bsd-3-clause |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/url_handler_test.py | 8 | 12827 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.tools.devappserver2.url_handler."""
import re
import unittest
import wsgiref.util
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import wsgi_test_utils
COOKIE = 'dev_appserver_login=johnny@example.com:False:115914779145204185301'
COOKIE_ADMIN = ('dev_appserver_login=johnny@example.com:True:'
'115914779145204185301')
class TestURLHandler(unittest.TestCase):
"""Tests URLHandler base class functionality."""
def test_match(self):
handler = url_handler.URLHandler(re.compile('/(foo|bar).*'))
self.assertTrue(handler.match('/foo'))
self.assertTrue(handler.match('/bar'))
self.assertTrue(handler.match('/foo/baz'))
self.assertTrue(handler.match('/bar/baz'))
self.assertFalse(handler.match('/baz'))
self.assertFalse(handler.match('/baz/baz'))
class TestAuthorization(wsgi_test_utils.WSGITestCase):
"""Tests authorization functionality in UserConfiguredURLHandler."""
def setUp(self):
self.environ = {}
wsgiref.util.setup_testing_defaults(self.environ)
# Have a different SERVER_NAME to HTTP_HOST to we can test that the server
# is using the right one.
self.environ['SERVER_NAME'] = '127.0.0.1'
self.environ['HTTP_HOST'] = 'localhost:8080'
self.environ['PATH_INFO'] = '/my/album/of/pictures'
self.environ['QUERY_STRING'] = 'with=some&query=parameters'
def test_optional(self):
"""Test page with no login requirement, and no cookie."""
url_map = appinfo.URLMap(url='/')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_required_redirect_no_login(self):
"""Test page with login: required; redirect, and no cookie."""
url_map = appinfo.URLMap(url='/',
login='required')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
expected_status = '302 Requires login'
expected_location = (
'http://localhost:8080/_ah/login?continue=http%3A//localhost%3A8080'
'/my/album/of/pictures%3Fwith%3Dsome%26query%3Dparameters')
expected_headers = {'Location': expected_location}
self.assertResponse(expected_status, expected_headers, '',
h.handle_authorization, self.environ)
def test_required_unauthorized_no_login(self):
"""Test page with login: required; unauthorized, and no cookie."""
url_map = appinfo.URLMap(url='/',
login='required',
auth_fail_action='unauthorized')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
expected_status = '401 Not authorized'
expected_headers = {'Content-Type': 'text/html',
'Cache-Control': 'no-cache'}
expected_content = 'Login required to view page.'
self.assertResponse(expected_status, expected_headers, expected_content,
h.handle_authorization, self.environ)
def test_required_succeed(self):
"""Test page with login: required, and a valid cookie."""
url_map = appinfo.URLMap(url='/',
login='required')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_required_no_login_fake_is_admin(self):
"""Test page with login: required, no cookie, with fake-is-admin header."""
# This should FAIL, because fake-is-admin only applies to login: admin, not
# login: required.
url_map = appinfo.URLMap(url='/',
login='required')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
expected_status = '302 Requires login'
expected_location = (
'http://localhost:8080/_ah/login?continue=http%3A//localhost%3A8080'
'/my/album/of/pictures%3Fwith%3Dsome%26query%3Dparameters')
expected_headers = {'Location': expected_location}
self.assertResponse(expected_status, expected_headers, '',
h.handle_authorization, self.environ)
def test_admin_no_login_fake_logged_in(self):
"""Tests page with login: admin, no cookie with fake login header."""
# This should FAIL, because a fake login does not imply admin privileges.
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
expected_status = '401 Not authorized'
expected_headers = {'Content-Type': 'text/html',
'Cache-Control': 'no-cache'}
expected_content = ('Current logged in user Fake User is not authorized '
'to view this page.')
self.assertResponse(expected_status, expected_headers, expected_content,
h.handle_authorization, self.environ)
def test_required_succeed_fake_is_admin(self):
"""Test with login: required, and a valid cookie, with fake-is-admin."""
url_map = appinfo.URLMap(url='/',
login='required')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_admin_redirect_no_login(self):
"""Test page with login: admin; redirect, and no cookie."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
expected_status = '302 Requires login'
expected_location = (
'http://localhost:8080/_ah/login?continue=http%3A//localhost%3A8080'
'/my/album/of/pictures%3Fwith%3Dsome%26query%3Dparameters')
expected_headers = {'Location': expected_location}
self.assertResponse(expected_status, expected_headers, '',
h.handle_authorization, self.environ)
def test_admin_unauthorized_no_login(self):
"""Test page with login: admin; unauthorized, and no cookie."""
url_map = appinfo.URLMap(url='/',
login='admin',
auth_fail_action='unauthorized')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
expected_status = '401 Not authorized'
expected_headers = {'Content-Type': 'text/html',
'Cache-Control': 'no-cache'}
expected_content = 'Login required to view page.'
self.assertResponse(expected_status, expected_headers, expected_content,
h.handle_authorization, self.environ)
def test_admin_no_admin(self):
"""Test page with login: admin, and a non-admin cookie."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE
expected_status = '401 Not authorized'
expected_headers = {'Content-Type': 'text/html',
'Cache-Control': 'no-cache'}
expected_content = ('Current logged in user johnny@example.com is not '
'authorized to view this page.')
self.assertResponse(expected_status, expected_headers, expected_content,
h.handle_authorization, self.environ)
def test_admin_succeed(self):
"""Test page with login: admin, and a valid admin cookie."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE_ADMIN
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_admin_no_login_fake_is_admin(self):
"""Test page with login: admin, and no cookie, with fake-is-admin."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_admin_no_admin_fake_is_admin(self):
"""Test with login: admin, and a non-admin cookie, with fake-is-admin."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_admin_succeed_fake_is_admin(self):
"""Test with login: admin, and valid admin cookie, with fake-is-admin."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ['HTTP_COOKIE'] = COOKIE_ADMIN
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_admin_no_login_fake_is_admin_header(self):
"""Test page with login: admin, and no cookie, with fake-is-admin header."""
url_map = appinfo.URLMap(url='/',
login='admin')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ[constants.FAKE_IS_ADMIN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
def test_login_required_no_login_fake_logged_in_header(self):
"""Test page with login: required with fake-login-required."""
url_map = appinfo.URLMap(url='/',
login='required')
h = url_handler.UserConfiguredURLHandler(url_map, '/$')
self.environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
def start_response(unused_status, unused_response_headers,
unused_exc_info=None):
# Successful authorization should not call start_response
self.fail('start_response was called')
r = h.handle_authorization(self.environ, start_response)
self.assertEqual(None, r)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Joni-Aaltonen/pebble-owner-info | .waf-1.7.0-9334f7e963bee5410f4fa28728feffdd/waflib/Tools/c_tests.py | 1 | 4186 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import Task
from waflib.Configure import conf
from waflib.TaskGen import feature,before_method,after_method
import sys
LIB_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllexport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void) { return 9; }
'''
MAIN_CODE='''
#ifdef _MSC_VER
#define testEXPORT __declspec(dllimport)
#else
#define testEXPORT
#endif
testEXPORT int lib_func(void);
int main(void) {return !(lib_func() == 9);}
'''
@feature('link_lib_test')
@before_method('process_source')
def link_lib_test_fun(self):
def write_test_file(task):
task.outputs[0].write(task.generator.code)
rpath=[]
if getattr(self,'add_rpath',False):
rpath=[self.bld.path.get_bld().abspath()]
mode=self.mode
m='%s %s'%(mode,mode)
ex=self.test_exec and'test_exec'or''
bld=self.bld
bld(rule=write_test_file,target='test.'+mode,code=LIB_CODE)
bld(rule=write_test_file,target='main.'+mode,code=MAIN_CODE)
bld(features='%sshlib'%m,source='test.'+mode,target='test')
bld(features='%sprogram %s'%(m,ex),source='main.'+mode,target='app',use='test',rpath=rpath)
@conf
def check_library(self,mode=None,test_exec=True):
if not mode:
mode='c'
if self.env.CXX:
mode='cxx'
self.check(compile_filename=[],features='link_lib_test',msg='Checking for libraries',mode=mode,test_exec=test_exec,)
INLINE_CODE='''
typedef int foo_t;
static %s foo_t static_foo () {return 0; }
%s foo_t foo () {
return 0;
}
'''
INLINE_VALUES=['inline','__inline__','__inline']
@conf
def check_inline(self,**kw):
self.start_msg('Checking for inline')
if not'define_name'in kw:
kw['define_name']='INLINE_MACRO'
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx']
else:
kw['features']=['c']
for x in INLINE_VALUES:
kw['fragment']=INLINE_CODE%(x,x)
try:
self.check(**kw)
except self.errors.ConfigurationError:
continue
else:
self.end_msg(x)
if x!='inline':
self.define('inline',x,quote=False)
return x
self.fatal('could not use inline functions')
LARGE_FRAGMENT='#include <unistd.h>\nint main() { return !(sizeof(off_t) >= 8); }\n'
@conf
def check_large_file(self,**kw):
if not'define_name'in kw:
kw['define_name']='HAVE_LARGEFILE'
if not'execute'in kw:
kw['execute']=True
if not'features'in kw:
if self.env.CXX:
kw['features']=['cxx','cxxprogram']
else:
kw['features']=['c','cprogram']
kw['fragment']=LARGE_FRAGMENT
kw['msg']='Checking for large file support'
ret=True
try:
if self.env.DEST_BINFMT!='pe':
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
if ret:
return True
kw['msg']='Checking for -D_FILE_OFFSET_BITS=64'
kw['defines']=['_FILE_OFFSET_BITS=64']
try:
ret=self.check(**kw)
except self.errors.ConfigurationError:
pass
else:
self.define('_FILE_OFFSET_BITS',64)
return ret
self.fatal('There is no support for large files')
ENDIAN_FRAGMENT='''
short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
int use_ascii (int i) {
return ascii_mm[i] + ascii_ii[i];
}
short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
int use_ebcdic (int i) {
return ebcdic_mm[i] + ebcdic_ii[i];
}
extern int foo;
'''
class grep_for_endianness(Task.Task):
color='PINK'
def run(self):
txt=self.inputs[0].read(flags='rb').decode('iso8859-1')
if txt.find('LiTTleEnDian')>-1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS')>-1:
self.generator.tmp.append('big')
else:
return-1
@feature('grep_for_endianness')
@after_method('process_source')
def grep_for_endianness_fun(self):
self.create_task('grep_for_endianness',self.compiled_tasks[0].outputs[0])
@conf
def check_endianness(self):
tmp=[]
def check_msg(self):
return tmp[0]
self.check(fragment=ENDIAN_FRAGMENT,features='c grep_for_endianness',msg="Checking for endianness",define='ENDIANNESS',tmp=tmp,okmsg=check_msg)
return tmp[0]
| gpl-2.0 |
be-cloud-be/horizon-addons | server-tools/base_export_manager/tests/test_ir_exports.py | 13 | 1833 | # -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.exceptions import ValidationError
from openerp.tests.common import TransactionCase
class TestIrExportsCase(TransactionCase):
def test_create_with_basic_data(self):
"""Emulate creation from original form.
This form is handled entirely client-side, and lacks some required
field values.
"""
# Emulate creation from JsonRpc, without model_id and field#_id
data = {
"name": u"Test éxport",
"resource": "ir.exports",
"export_fields": [
[0, 0, {"name": "export_fields"}],
[0, 0, {"name": "export_fields/create_uid"}],
[0, 0, {"name": "export_fields/create_date"}],
[0, 0, {"name": "export_fields/field1_id"}],
],
}
record = self.env["ir.exports"].create(data)
self.assertEqual(record.model_id.model, data["resource"])
def test_create_without_model(self):
"""Creating a record without ``model_id`` and ``resource`` fails."""
IrExports = self.env["ir.exports"]
model = IrExports._get_model_id("res.partner")
# Creating with resource
record = IrExports.create({
"name": "some",
"resource": model.model,
})
self.assertEqual(record.model_id, model)
# Creating with model_id
record = IrExports.create({
"name": "some",
"model_id": model.id,
})
self.assertEqual(record.resource, model.model)
# Creating without anyone
with self.assertRaises(ValidationError):
IrExports.create({
"name": "some",
})
| agpl-3.0 |
jaimeantena4040/MiSitioWeb | meta/lib/oe/tests/test_license.py | 12 | 2047 | import unittest
import oe.license
class SeenVisitor(oe.license.LicenseVisitor):
def __init__(self):
self.seen = []
oe.license.LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.seen.append(node.s)
class TestSingleLicense(unittest.TestCase):
licenses = [
"GPLv2",
"LGPL-2.0",
"Artistic",
"MIT",
"GPLv3+",
"FOO_BAR",
]
invalid_licenses = ["GPL/BSD"]
@staticmethod
def parse(licensestr):
visitor = SeenVisitor()
visitor.visit_string(licensestr)
return visitor.seen
def test_single_licenses(self):
for license in self.licenses:
licenses = self.parse(license)
self.assertListEqual(licenses, [license])
def test_invalid_licenses(self):
for license in self.invalid_licenses:
with self.assertRaises(oe.license.InvalidLicense) as cm:
self.parse(license)
self.assertEqual(cm.exception.license, license)
class TestSimpleCombinations(unittest.TestCase):
tests = {
"FOO&BAR": ["FOO", "BAR"],
"BAZ & MOO": ["BAZ", "MOO"],
"ALPHA|BETA": ["ALPHA"],
"BAZ&MOO|FOO": ["FOO"],
"FOO&BAR|BAZ": ["FOO", "BAR"],
}
preferred = ["ALPHA", "FOO", "BAR"]
def test_tests(self):
def choose(a, b):
if all(lic in self.preferred for lic in b):
return b
else:
return a
for license, expected in self.tests.items():
licenses = oe.license.flattened_licenses(license, choose)
self.assertListEqual(licenses, expected)
class TestComplexCombinations(TestSimpleCombinations):
tests = {
"FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
"(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
"((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
"(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
}
preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
| gpl-2.0 |
homologus/Pandoras-Toolbox-for-Bioinformatics | src/SPAdes/ext/src/python_libs/pyyaml2/cyaml.py | 537 | 3290 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| gpl-3.0 |
switchboardOp/ansible | lib/ansible/modules/network/panos/panos_check.py | 78 | 4114 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_check
short_description: check if PAN-OS device is ready for configuration
description:
- Check if PAN-OS device is ready for being configured (no pending jobs).
- The check could be done once or multiple times until the device is ready.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
timeout:
description:
- timeout of API calls
required: false
default: "0"
interval:
description:
- time waited between checks
required: false
default: "0"
'''
EXAMPLES = '''
# single check on 192.168.1.1 with credentials admin/admin
- name: check if ready
panos_check:
ip_address: "192.168.1.1"
password: "admin"
# check for 10 times, every 30 seconds, if device 192.168.1.1
# is ready, using credentials admin/admin
- name: wait for reboot
panos_check:
ip_address: "192.168.1.1"
password: "admin"
register: result
until: not result|failed
retries: 10
delay: 30
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
import time
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def check_jobs(jobs, module):
job_check = False
for j in jobs:
status = j.find('.//status')
if status is None:
return False
if status.text != 'FIN':
return False
job_check = True
return job_check
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
timeout=dict(default=0, type='int'),
interval=dict(default=0, type='int')
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
timeout = module.params['timeout']
interval = module.params['interval']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password,
timeout=60
)
checkpnt = time.time()+timeout
while True:
try:
xapi.op(cmd="show jobs all", cmd_xml=True)
except:
pass
else:
jobs = xapi.element_root.findall('.//job')
if check_jobs(jobs, module):
module.exit_json(changed=True, msg="okey dokey")
if time.time() > checkpnt:
break
time.sleep(interval)
module.fail_json(msg="Timeout")
if __name__ == '__main__':
main()
| gpl-3.0 |
purcoin/PurpleCoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
Alexoner/mooc | coursera/nlpintro-001/Assignment2/solutionsA.py | 1 | 9462 | import math
import nltk
import time
import sys
# Constants to be used by you when you fill the functions
START_SYMBOL = '*'
STOP_SYMBOL = 'STOP'
MINUS_INFINITY_SENTENCE_LOG_PROB = -1000
log2 = lambda x: math.log(x, 2)
# TODO: IMPLEMENT THIS FUNCTION
# Calculates unigram, bigram, and trigram probabilities given a training corpus
# training_corpus: is a list of the sentences. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function outputs three python dictionaries, where the keys are
# tuples expressing the ngram and the value is the log probability of that
# ngram
def calc_probabilities(training_corpus):
"""
this is docstring
"""
# unigram_tuples = []
# bigram_tuples = []
# trigram_tuples = []
unigram_count = {}
bigram_count = {}
trigram_count = {}
unigram_count_pnodes = {}
bigram_count_pnodes = {}
trigram_count_pnodes = {}
unigram_total = 0
bigram_total = 0
trigram_total = 0
print 'total {} sentences'.format(len(training_corpus))
for i in xrange(0, len(training_corpus)):
if i % 3000 == 0:
print 'processing ', i, 'th sentence...'
training_corpus[i] = START_SYMBOL + ' ' + training_corpus[i]
training_corpus[i] = training_corpus[i] + ' ' + STOP_SYMBOL
# training_corpus[i].replace('.',' ' + STOP_SYMBOL)
tokens = training_corpus[i].split()
unigram_tuples_i = list((token,) for token in tokens)
bigram_tuples_i = list(nltk.bigrams(tokens))
trigram_tuples_i = list(nltk.trigrams(tokens))
unigram_total += len(unigram_tuples_i)
bigram_total += len(bigram_tuples_i)
trigram_total += len(trigram_tuples_i)
for item in unigram_tuples_i:
if item in [(START_SYMBOL,)]:
continue
unigram_count.setdefault(item, 0)
unigram_count_pnodes.setdefault(item[0:-1], 0)
unigram_count[item] = unigram_count[item] + 1
unigram_count_pnodes[
item[0:-1]] = unigram_count_pnodes[item[0:-1]] + 1
for item in bigram_tuples_i:
bigram_count.setdefault(item, 0)
bigram_count_pnodes.setdefault(item[0:-1], 0)
bigram_count[item] = bigram_count[item] + 1
bigram_count_pnodes[
item[0:-1]] = bigram_count_pnodes[item[0:-1]] + 1
for item in trigram_tuples_i:
trigram_count.setdefault(item, 0)
trigram_count_pnodes.setdefault(item[0:-1], 0)
trigram_count[item] = trigram_count[item] + 1
trigram_count_pnodes[
item[0:-1]] = trigram_count_pnodes[item[0:-1]] + 1
unigram_p = {
item: math.log(
unigram_count[item],
2) -
math.log(
unigram_count_pnodes[
item[
0:-
1]],
2) for item in set(unigram_count)}
bigram_p = {
item: math.log(
bigram_count[item],
2) -
math.log(
bigram_count_pnodes[
item[
0:-
1]],
2) for item in set(bigram_count)}
trigram_p = {
item: math.log(
trigram_count[item],
2) -
math.log(
trigram_count_pnodes[
item[
0:-
1]],
2) for item in set(trigram_count)}
print "calc_probabilities finished!"
return unigram_p, bigram_p, trigram_p
# Prints the output for q1
# Each input is a python dictionary where keys are a tuple expressing the
# ngram, and the value is the log probability of that ngram
def q1_output(unigrams, bigrams, trigrams, filename):
# output probabilities
outfile = open(filename, 'w')
unigrams_keys = sorted(unigrams.keys())
for unigram in unigrams_keys:
outfile.write('UNIGRAM ' +
unigram[0] +
' ' +
str(unigrams[unigram]) +
'\n')
outfile.flush()
bigrams_keys = sorted(bigrams.keys())
for bigram in bigrams_keys:
outfile.write('BIGRAM ' +
bigram[0] +
' ' +
bigram[1] +
' ' +
str(bigrams[bigram]) +
'\n')
outfile.flush()
trigrams_keys = sorted(trigrams.keys())
for trigram in trigrams_keys:
outfile.write('TRIGRAM ' +
trigram[0] +
' ' +
trigram[1] +
' ' +
trigram[2] +
' ' +
str(trigrams[trigram]) +
'\n')
outfile.flush()
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence
# ngram_p: python dictionary of probabilities of uni-, bi- and trigrams.
# n: size of the ngram you want to use to compute probabilities
# corpus: list of sentences to score. Each sentence is a string with tokens separated by spaces, ending in a newline character.
# This function must return a python list of scores, where the first
# element is the score of the first sentence, etc.
def score(ngram_p, n, corpus):
print "scoring corpus for ", n, "-grams"
scores = []
for i, sentence in enumerate(corpus):
ngram_tuples = None
score_i = 0
if i % 10000 == 0:
print 'scoring ', i, 'th sentence...'
tokens = sentence.split()
if n == 1:
ngram_tuples = list([(token,) for token in tokens])
elif n == 2:
ngram_tuples = list(nltk.bigrams(tokens))
elif n == 3:
ngram_tuples = list(nltk.trigrams(tokens))
try:
score_i = sum([ngram_p[gram] for gram in ngram_tuples
if gram not in [(START_SYMBOL,)]])
except KeyError as error:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print 'ngram_tuple ', gram, ' not in dict ', error.message
scores.append(score_i)
return scores
# Outputs a score to a file
# scores: list of scores
# filename: is the output file name
def score_output(scores, filename):
outfile = open(filename, 'w')
for score in scores:
outfile.write(str(score) + '\n')
outfile.close()
# TODO: IMPLEMENT THIS FUNCTION
# Calculates scores (log probabilities) for every sentence with a linearly interpolated model
# Each ngram argument is a python dictionary where the keys are tuples that express an ngram and the value is the log probability of that ngram
# Like score(), this function returns a python list of scores
# TODO: `EM` algorithm to find the optimal weights.
def linearscore(unigrams, bigrams, trigrams, corpus):
scores = []
weights = (1. / 3, 1. / 3, 1. / 3,)
for i, sentence in enumerate(corpus):
if i % 3000 == 0:
print 'linearscore ', i, 'th sentence...'
score_i = 0
tokens = sentence.split()
trigram_tuples = list(nltk.trigrams(tokens))
try:
for trigram in trigram_tuples:
score_i += log2(sum([weights[0] * 2 ** trigrams[trigram[0:]],
weights[1] * 2 ** bigrams[trigram[1:]],
weights[2] * 2 ** unigrams[trigram[2:]],
]))
except KeyError as e:
score_i = MINUS_INFINITY_SENTENCE_LOG_PROB
print i, 'th sentence', 'ngram ', trigram, ' not in dict', e.message
scores.append(score_i)
return scores
DATA_PATH = 'data/'
OUTPUT_PATH = 'output/'
# DO NOT MODIFY THE MAIN FUNCTION
def main():
# start timer
time.clock()
# get data
infile = open(DATA_PATH + 'Brown_train.txt', 'r')
corpus = infile.readlines()
infile.close()
# calculate ngram probabilities (question 1)
unigrams, bigrams, trigrams = calc_probabilities(corpus)
# question 1 output
q1_output(unigrams, bigrams, trigrams, OUTPUT_PATH + 'A1.txt')
# score sentences (question 2)
uniscores = score(unigrams, 1, corpus)
biscores = score(bigrams, 2, corpus)
triscores = score(trigrams, 3, corpus)
# question 2 output
score_output(uniscores, OUTPUT_PATH + 'A2.uni.txt')
score_output(biscores, OUTPUT_PATH + 'A2.bi.txt')
score_output(triscores, OUTPUT_PATH + 'A2.tri.txt')
# linear interpolation (question 3)
linearscores = linearscore(unigrams, bigrams, trigrams, corpus)
# question 3 output
score_output(linearscores, OUTPUT_PATH + 'A3.txt')
# open Sample1 and Sample2 (question 5)
infile = open(DATA_PATH + 'Sample1.txt', 'r')
sample1 = infile.readlines()
infile.close()
infile = open(DATA_PATH + 'Sample2.txt', 'r')
sample2 = infile.readlines()
infile.close()
# score the samples
sample1scores = linearscore(unigrams, bigrams, trigrams, sample1)
sample2scores = linearscore(unigrams, bigrams, trigrams, sample2)
# question 5 output
score_output(sample1scores, OUTPUT_PATH + 'Sample1_scored.txt')
score_output(sample2scores, OUTPUT_PATH + 'Sample2_scored.txt')
# print total time to run Part A
print("Part A time: " + str(time.clock()) + ' sec')
if __name__ == "__main__":
main()
| apache-2.0 |
dbckz/ansible | test/units/playbook/test_base.py | 34 | 23930 | # (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.errors import AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.playbook import base
from units.mock.loader import DictDataLoader
class TestBase(unittest.TestCase):
ClassUnderTest = base.Base
def setUp(self):
self.assorted_vars = {'var_2_key': 'var_2_value',
'var_1_key': 'var_1_value',
'a_list': ['a_list_1', 'a_list_2'],
'a_dict': {'a_dict_key': 'a_dict_value'},
'a_set': set(['set_1', 'set_2']),
'a_int': 42,
'a_float': 37.371,
'a_bool': True,
'a_none': None,
}
self.b = self.ClassUnderTest()
def _base_validate(self, ds):
bsc = self.ClassUnderTest()
parent = ExampleParentBaseSubClass()
bsc._parent = parent
bsc._dep_chain = [parent]
parent._dep_chain = None
bsc.load_data(ds)
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
bsc.post_validate(templar)
return bsc
def test(self):
self.assertIsInstance(self.b, base.Base)
self.assertIsInstance(self.b, self.ClassUnderTest)
# dump me doesnt return anything or change anything so not much to assert
def test_dump_me_empty(self):
self.b.dump_me()
def test_dump_me(self):
ds = {'environment': [],
'vars': {'var_2_key': 'var_2_value',
'var_1_key': 'var_1_value'}}
b = self._base_validate(ds)
b.dump_me()
def _assert_copy(self, orig, copy):
self.assertIsInstance(copy, self.ClassUnderTest)
self.assertIsInstance(copy, base.Base)
self.assertEquals(len(orig._valid_attrs),
len(copy._valid_attrs))
sentinel = 'Empty DS'
self.assertEquals(getattr(orig, '_ds', sentinel),
getattr(copy, '_ds', sentinel))
def test_copy_empty(self):
copy = self.b.copy()
self._assert_copy(self.b, copy)
def test_copy_with_vars(self):
ds = {'vars': self.assorted_vars}
b = self._base_validate(ds)
copy = b.copy()
self._assert_copy(b, copy)
def test_serialize(self):
ds = {}
ds = {'environment': [],
'vars': self.assorted_vars
}
b = self._base_validate(ds)
ret = b.serialize()
self.assertIsInstance(ret, dict)
def test_deserialize(self):
data = {}
d = self.ClassUnderTest()
d.deserialize(data)
self.assertIn('run_once', d._attributes)
self.assertIn('check_mode', d._attributes)
data = {'no_log': False,
'remote_user': None,
'vars': self.assorted_vars,
#'check_mode': False,
'always_run': False,
'environment': [],
'run_once': False,
'connection': None,
'ignore_errors': False,
'port': 22,
'a_sentinel_with_an_unlikely_name': ['sure, a list']}
d = self.ClassUnderTest()
d.deserialize(data)
self.assertNotIn('a_sentinel_with_an_unlikely_name', d._attributes)
self.assertIn('run_once', d._attributes)
self.assertIn('check_mode', d._attributes)
def test_serialize_then_deserialize(self):
ds = {'environment': [],
'vars': self.assorted_vars}
b = self._base_validate(ds)
copy = b.copy()
ret = b.serialize()
b.deserialize(ret)
c = self.ClassUnderTest()
c.deserialize(ret)
# TODO: not a great test, but coverage...
self.maxDiff = None
self.assertDictEqual(b.serialize(), copy.serialize())
self.assertDictEqual(c.serialize(), copy.serialize())
def test_post_validate_empty(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
ret = self.b.post_validate(templar)
self.assertIsNone(ret)
def test_get_ds_none(self):
ds = self.b.get_ds()
self.assertIsNone(ds)
def test_load_data_ds_is_none(self):
self.assertRaises(AssertionError, self.b.load_data, None)
def test_load_data_invalid_attr(self):
ds = {'not_a_valid_attr': [],
'other': None}
self.assertRaises(AnsibleParserError, self.b.load_data, ds)
def test_load_data_invalid_attr_type(self):
ds = {'environment': True}
# environment is supposed to be a list. This
# seems like it shouldn't work?
ret = self.b.load_data(ds)
self.assertEquals(True, ret._attributes['environment'])
def test_post_validate(self):
ds = {'environment': [],
'port': 443}
b = self._base_validate(ds)
self.assertEquals(b.port, 443)
self.assertEquals(b.environment, [])
def test_post_validate_invalid_attr_types(self):
ds = {'environment': [],
'port': 'some_port'}
b = self._base_validate(ds)
self.assertEquals(b.port, 'some_port')
def test_squash(self):
data = self.b.serialize()
self.b.squash()
squashed_data = self.b.serialize()
# TODO: assert something
self.assertFalse(data['squashed'])
self.assertTrue(squashed_data['squashed'])
def test_vars(self):
# vars as a dict.
ds = {'environment': [],
'vars': {'var_2_key': 'var_2_value',
'var_1_key': 'var_1_value'}}
b = self._base_validate(ds)
self.assertEquals(b.vars['var_1_key'], 'var_1_value')
def test_vars_list_of_dicts(self):
ds = {'environment': [],
'vars': [{'var_2_key': 'var_2_value'},
{'var_1_key': 'var_1_value'}]
}
b = self._base_validate(ds)
self.assertEquals(b.vars['var_1_key'], 'var_1_value')
def test_vars_not_dict_or_list(self):
ds = {'environment': [],
'vars': 'I am a string, not a dict or a list of dicts'}
self.assertRaises(AnsibleParserError, self.b.load_data, ds)
def test_vars_not_valid_identifier(self):
ds = {'environment': [],
'vars': [{'var_2_key': 'var_2_value'},
{'1an-invalid identifer': 'var_1_value'}]
}
self.assertRaises(AnsibleParserError, self.b.load_data, ds)
def test_vars_is_list_but_not_of_dicts(self):
ds = {'environment': [],
'vars': ['foo', 'bar', 'this is a string not a dict']
}
self.assertRaises(AnsibleParserError, self.b.load_data, ds)
def test_vars_is_none(self):
# If vars is None, we should get a empty dict back
ds = {'environment': [],
'vars': None
}
b = self._base_validate(ds)
self.assertEquals(b.vars, {})
def test_validate_empty(self):
self.b.validate()
self.assertTrue(self.b._validated)
def test_getters(self):
# not sure why these exist, but here are tests anyway
loader = self.b.get_loader()
variable_manager = self.b.get_variable_manager()
self.assertEquals(loader, self.b._loader)
self.assertEquals(variable_manager, self.b._variable_manager)
# TODO/FIXME: test methods for each of the compares would be more precise
class TestExtendValue(unittest.TestCase):
def test_extend_value(self):
# _extend_value could be a module or staticmethod but since its
# not, the test is here.
b = base.Base()
value_list = ['first', 'second']
new_value_list = ['new_first', 'new_second']
ret = b._extend_value(value_list, new_value_list)
self.assertEquals(value_list + new_value_list, ret)
ret_prepend = b._extend_value(value_list, new_value_list, prepend=True)
self.assertEquals(new_value_list + value_list, ret_prepend)
ret = b._extend_value(new_value_list, value_list)
self.assertEquals(new_value_list + value_list, ret)
ret = b._extend_value(new_value_list, value_list, prepend=True)
self.assertEquals(value_list + new_value_list, ret)
some_string = 'some string'
ret = b._extend_value(some_string, new_value_list)
self.assertEquals([some_string] + new_value_list, ret)
new_value_string = 'this is the new values'
ret = b._extend_value(some_string, new_value_string)
self.assertEquals([some_string, new_value_string], ret)
ret = b._extend_value(value_list, new_value_string)
self.assertEquals(value_list + [new_value_string], ret)
def test_extend_value_none(self):
b = base.Base()
ret = b._extend_value(None, None)
self.assertEquals(len(ret), 0)
self.assertFalse(ret)
ret = b._extend_value(None, ['foo'])
self.assertEquals(ret, ['foo'])
class ExampleException(Exception):
pass
# naming fails me...
class ExampleParentBaseSubClass(base.Base):
_test_attr_parent_string = FieldAttribute(isa='string', default='A string attr for a class that may be a parent for testing')
def __init__(self):
super(ExampleParentBaseSubClass, self).__init__()
self._dep_chain = None
def get_dep_chain(self):
return self._dep_chain
class ExampleSubClass(base.Base):
_test_attr_blip = FieldAttribute(isa='string', default='example sub class test_attr_blip',
inherit=False,
always_post_validate=True)
def __init__(self):
super(ExampleSubClass, self).__init__()
def get_dep_chain(self):
if self._parent:
return self._parent.get_dep_chain()
else:
return None
class BaseSubClass(base.Base):
_name = FieldAttribute(isa='string', default='', always_post_validate=True)
_test_attr_bool = FieldAttribute(isa='bool', always_post_validate=True)
_test_attr_int = FieldAttribute(isa='int', always_post_validate=True)
_test_attr_float = FieldAttribute(isa='float', default=3.14159, always_post_validate=True)
_test_attr_list = FieldAttribute(isa='list', listof=string_types, always_post_validate=True)
_test_attr_list_no_listof = FieldAttribute(isa='list', always_post_validate=True)
_test_attr_list_required = FieldAttribute(isa='list', listof=string_types, required=True,
default=[], always_post_validate=True)
_test_attr_barelist = FieldAttribute(isa='barelist', always_post_validate=True)
_test_attr_string = FieldAttribute(isa='string', default='the_test_attr_string_default_value')
_test_attr_string_required = FieldAttribute(isa='string', required=True,
default='the_test_attr_string_default_value')
_test_attr_percent = FieldAttribute(isa='percent', always_post_validate=True)
_test_attr_set = FieldAttribute(isa='set', default=set(), always_post_validate=True)
_test_attr_dict = FieldAttribute(isa='dict', default={'a_key': 'a_value'}, always_post_validate=True)
_test_attr_class = FieldAttribute(isa='class', class_type=ExampleSubClass)
_test_attr_class_post_validate = FieldAttribute(isa='class', class_type=ExampleSubClass,
always_post_validate=True)
_test_attr_unknown_isa = FieldAttribute(isa='not_a_real_isa', always_post_validate=True)
_test_attr_example = FieldAttribute(isa='string', default='the_default',
always_post_validate=True)
_test_attr_none = FieldAttribute(isa='string',
always_post_validate=True)
_test_attr_preprocess = FieldAttribute(isa='string', default='the default for preprocess')
_test_attr_method = FieldAttribute(isa='string', default='some attr with a getter',
always_post_validate=True)
_test_attr_method_missing = FieldAttribute(isa='string', default='some attr with a missing getter',
always_post_validate=True)
def _preprocess_data_basesubclass(self, ds):
return ds
def preprocess_data(self, ds):
return super(BaseSubClass, self).preprocess_data(ds)
def _get_attr_test_attr_method(self):
return 'foo bar'
def _validate_test_attr_example(self, attr, name, value):
if not isinstance(value, str):
raise ExampleException('_test_attr_example is not a string: %s type=%s' % (value, type(value)))
def _post_validate_test_attr_example(self, attr, value, templar):
after_template_value = templar.template(value)
return after_template_value
def _post_validate_test_attr_none(self, attr, value, templar):
return None
def _get_parent_attribute(self, attr, extend=False, prepend=False):
value = None
try:
value = self._attributes[attr]
if self._parent and (value is None or extend):
parent_value = getattr(self._parent, attr, None)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except KeyError:
pass
return value
# terrible name, but it is a TestBase subclass for testing subclasses of Base
class TestBaseSubClass(TestBase):
ClassUnderTest = BaseSubClass
def _base_validate(self, ds):
ds['test_attr_list_required'] = []
return super(TestBaseSubClass, self)._base_validate(ds)
def test_attr_bool(self):
ds = {'test_attr_bool': True}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_bool, True)
def test_attr_int(self):
MOST_RANDOM_NUMBER = 37
ds = {'test_attr_int': MOST_RANDOM_NUMBER}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_int, MOST_RANDOM_NUMBER)
def test_attr_int_del(self):
MOST_RANDOM_NUMBER = 37
ds = {'test_attr_int': MOST_RANDOM_NUMBER}
bsc = self._base_validate(ds)
del bsc.test_attr_int
self.assertNotIn('test_attr_int', bsc._attributes)
def test_attr_float(self):
roughly_pi = 4.0
ds = {'test_attr_float': roughly_pi}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_float, roughly_pi)
def test_attr_percent(self):
percentage = '90%'
percentage_float = 90.0
ds = {'test_attr_percent': percentage}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_percent, percentage_float)
# This method works hard and gives it its all and everything it's got. It doesn't
# leave anything on the field. It deserves to pass. It has earned it.
def test_attr_percent_110_percent(self):
percentage = '110.11%'
percentage_float = 110.11
ds = {'test_attr_percent': percentage}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_percent, percentage_float)
# This method is just here for the paycheck.
def test_attr_percent_60_no_percent_sign(self):
percentage = '60'
percentage_float = 60.0
ds = {'test_attr_percent': percentage}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_percent, percentage_float)
def test_attr_set(self):
test_set = set(['first_string_in_set', 'second_string_in_set'])
ds = {'test_attr_set': test_set}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_set, test_set)
def test_attr_set_string(self):
test_data = ['something', 'other']
test_value = ','.join(test_data)
ds = {'test_attr_set': test_value}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_set, set(test_data))
def test_attr_set_not_string_or_list(self):
test_value = 37.1
ds = {'test_attr_set': test_value}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_set, set([test_value]))
def test_attr_dict(self):
test_dict = {'a_different_key': 'a_different_value'}
ds = {'test_attr_dict': test_dict}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_dict, test_dict)
def test_attr_dict_string(self):
test_value = 'just_some_random_string'
ds = {'test_attr_dict': test_value}
self.assertRaisesRegexp(AnsibleParserError, 'is not a dictionary', self._base_validate, ds)
def test_attr_class(self):
esc = ExampleSubClass()
ds = {'test_attr_class': esc}
bsc = self._base_validate(ds)
self.assertIs(bsc.test_attr_class, esc)
def test_attr_class_wrong_type(self):
not_a_esc = ExampleSubClass
ds = {'test_attr_class': not_a_esc}
bsc = self._base_validate(ds)
self.assertIs(bsc.test_attr_class, not_a_esc)
def test_attr_class_post_validate(self):
esc = ExampleSubClass()
ds = {'test_attr_class_post_validate': esc}
bsc = self._base_validate(ds)
self.assertIs(bsc.test_attr_class_post_validate, esc)
def test_attr_class_post_validate_class_not_instance(self):
not_a_esc = ExampleSubClass
ds = {'test_attr_class_post_validate': not_a_esc}
self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*Meta.*instead',
self._base_validate, ds)
def test_attr_class_post_validate_wrong_class(self):
not_a_esc = 37
ds = {'test_attr_class_post_validate': not_a_esc}
self.assertRaisesRegexp(AnsibleParserError, 'is not a valid.*got a.*int.*instead',
self._base_validate, ds)
def test_attr_remote_user(self):
ds = {'remote_user': 'testuser'}
bsc = self._base_validate(ds)
# TODO: attemp to verify we called parent gettters etc
self.assertEquals(bsc.remote_user, 'testuser')
def test_attr_example_undefined(self):
ds = {'test_attr_example': '{{ some_var_that_shouldnt_exist_to_test_omit }}'}
exc_regex_str = 'test_attr_example.*which appears to include a variable that is undefined.*some_var_that_shouldnt'
self.assertRaisesRegexp(AnsibleParserError, exc_regex_str, self._base_validate, ds)
def test_attr_name_undefined(self):
ds = {'name': '{{ some_var_that_shouldnt_exist_to_test_omit }}'}
bsc = self._base_validate(ds)
# the attribute 'name' is special cases in post_validate
self.assertEquals(bsc.name, '{{ some_var_that_shouldnt_exist_to_test_omit }}')
def test_subclass_validate_method(self):
ds = {'test_attr_list': ['string_list_item_1', 'string_list_item_2'],
'test_attr_example': 'the_test_attr_example_value_string'}
# Not throwing an exception here is the test
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_example, 'the_test_attr_example_value_string')
def test_subclass_validate_method_invalid(self):
ds = {'test_attr_example': [None]}
self.assertRaises(ExampleException, self._base_validate, ds)
def test_attr_none(self):
ds = {'test_attr_none': 'foo'}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_none, None)
def test_attr_string(self):
the_string_value = "the new test_attr_string_value"
ds = {'test_attr_string': the_string_value}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_string, the_string_value)
def test_attr_string_invalid_list(self):
ds = {'test_attr_string': ['The new test_attr_string', 'value, however in a list']}
self.assertRaises(AnsibleParserError, self._base_validate, ds)
def test_attr_string_required(self):
the_string_value = "the new test_attr_string_required_value"
ds = {'test_attr_string_required': the_string_value}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_string_required, the_string_value)
def test_attr_list_invalid(self):
ds = {'test_attr_list': {}}
self.assertRaises(AnsibleParserError, self._base_validate, ds)
def test_attr_list(self):
string_list = ['foo', 'bar']
ds = {'test_attr_list': string_list}
bsc = self._base_validate(ds)
self.assertEquals(string_list, bsc._attributes['test_attr_list'])
def test_attr_list_none(self):
ds = {'test_attr_list': None}
bsc = self._base_validate(ds)
self.assertEquals(None, bsc._attributes['test_attr_list'])
def test_attr_list_no_listof(self):
test_list = ['foo', 'bar', 123]
ds = {'test_attr_list_no_listof': test_list}
bsc = self._base_validate(ds)
self.assertEquals(test_list, bsc._attributes['test_attr_list_no_listof'])
def test_attr_list_required(self):
string_list = ['foo', 'bar']
ds = {'test_attr_list_required': string_list}
bsc = self.ClassUnderTest()
bsc.load_data(ds)
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
bsc.post_validate(templar)
self.assertEquals(string_list, bsc._attributes['test_attr_list_required'])
def test_attr_list_required_empty_string(self):
string_list = [""]
ds = {'test_attr_list_required': string_list}
bsc = self.ClassUnderTest()
bsc.load_data(ds)
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
self.assertRaisesRegexp(AnsibleParserError, 'cannot have empty values',
bsc.post_validate, templar)
def test_attr_barelist(self):
ds = {'test_attr_barelist': 'comma,separated,values'}
bsc = self._base_validate(ds)
self.assertEquals(['comma', 'separated', 'values'], bsc._attributes['test_attr_barelist'])
def test_attr_unknown(self):
a_list = ['some string']
ds = {'test_attr_unknown_isa': a_list}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_unknown_isa, a_list)
def test_attr_method(self):
ds = {'test_attr_method': 'value from the ds'}
bsc = self._base_validate(ds)
# The value returned by the subclasses _get_attr_test_attr_method
self.assertEquals(bsc.test_attr_method, 'foo bar')
def test_attr_method_missing(self):
a_string = 'The value set from the ds'
ds = {'test_attr_method_missing': a_string}
bsc = self._base_validate(ds)
self.assertEquals(bsc.test_attr_method_missing, a_string)
| gpl-3.0 |
alex/tablib | tablib/packages/yaml/events.py | 985 | 2445 |
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| mit |
pschmitt/home-assistant | tests/components/mqtt/test_cover.py | 5 | 62666 | """The tests for the MQTT cover platform."""
import pytest
from homeassistant.components import cover
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
)
from homeassistant.components.mqtt.cover import MqttCover
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
cover.DOMAIN: {"platform": "mqtt", "name": "test", "state_topic": "test-topic"}
}
async def test_state_via_state_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "state-topic", STATE_OPEN)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_opening_and_closing_state_via_custom_state_payload(hass, mqtt_mock):
"""Test the controlling opening and closing state via a custom payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"state_opening": "34",
"state_closing": "--43",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "34")
state = hass.states.get("cover.test")
assert state.state == STATE_OPENING
async_fire_mqtt_message(hass, "state-topic", "--43")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSING
async_fire_mqtt_message(hass, "state-topic", STATE_CLOSED)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_open_closed_state_from_position_optimistic(hass, mqtt_mock):
"""Test the state after setting the position using optimistic mode."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "position-topic",
"set_position_topic": "set-position-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"optimistic": True,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 0},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_position_via_position_topic(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"position_open": 100,
"position_closed": 0,
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "get-position-topic", "0")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "100")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async def test_state_via_template(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "\
{% if (value | multiply(0.01) | int) == 0 %}\
closed\
{% else %}\
open\
{% endif %}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", "10000")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "state-topic", "99")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_position_via_template(hass, mqtt_mock):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"qos": 0,
"value_template": "{{ (value | multiply(0.01)) | int }}",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "get-position-topic", "10000")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "5000")
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "99")
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_optimistic_state_change(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert STATE_CLOSED == state.state
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert STATE_OPEN == state.state
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
async def test_optimistic_state_change_with_position(hass, mqtt_mock):
"""Test changing state optimistically."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"optimistic": True,
"command_topic": "command-topic",
"position_topic": "position-topic",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(ATTR_ASSUMED_STATE)
assert state.attributes.get(ATTR_CURRENT_POSITION) is None
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert state.state == STATE_OPEN
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert STATE_CLOSED == state.state
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("cover.test")
assert STATE_OPEN == state.state
assert state.attributes.get(ATTR_CURRENT_POSITION) == 100
await hass.services.async_call(
cover.DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 0, False)
state = hass.states.get("cover.test")
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_CURRENT_POSITION) == 0
async def test_send_open_cover_command(hass, mqtt_mock):
"""Test the sending of open_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "OPEN", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_close_cover_command(hass, mqtt_mock):
"""Test the sending of close_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "CLOSE", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_send_stop__cover_command(hass, mqtt_mock):
"""Test the sending of stop_cover."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
await hass.services.async_call(
cover.DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with("command-topic", "STOP", 2, False)
state = hass.states.get("cover.test")
assert state.state == STATE_UNKNOWN
async def test_current_cover_position(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert not (ATTR_CURRENT_POSITION in state_attributes_dict)
assert not (ATTR_CURRENT_TILT_POSITION in state_attributes_dict)
assert not (4 & hass.states.get("cover.test").attributes["supported_features"] == 4)
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 0
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 50
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 100
async def test_current_cover_position_inverted(hass, mqtt_mock):
"""Test the current cover position."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert not (ATTR_CURRENT_POSITION in state_attributes_dict)
assert not (ATTR_CURRENT_TILT_POSITION in state_attributes_dict)
assert not (4 & hass.states.get("cover.test").attributes["supported_features"] == 4)
async_fire_mqtt_message(hass, "get-position-topic", "100")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async_fire_mqtt_message(hass, "get-position-topic", "0")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 100
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "50")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "non-numeric")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 50
assert hass.states.get("cover.test").state == STATE_OPEN
async_fire_mqtt_message(hass, "get-position-topic", "101")
current_percentage_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_percentage_cover_position == 0
assert hass.states.get("cover.test").state == STATE_CLOSED
async def test_optimistic_position(hass, mqtt_mock):
"""Test optimistic position is not supported."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_position_update(hass, mqtt_mock):
"""Test cover position update from received MQTT message."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"set_position_topic": "set-position-topic",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert not (ATTR_CURRENT_POSITION in state_attributes_dict)
assert not (ATTR_CURRENT_TILT_POSITION in state_attributes_dict)
assert 4 & hass.states.get("cover.test").attributes["supported_features"] == 4
async_fire_mqtt_message(hass, "get-position-topic", "22")
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_POSITION in state_attributes_dict
assert not (ATTR_CURRENT_TILT_POSITION in state_attributes_dict)
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_POSITION
]
assert current_cover_position == 22
async def test_set_position_templated(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "get-position-topic",
"command_topic": "command-topic",
"position_open": 100,
"position_closed": 0,
"set_position_topic": "set-position-topic",
"set_position_template": "{{100-62}}",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 100},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(
"set-position-topic", "38", 0, False
)
async def test_set_position_untemplated(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 62},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", 62, 0, False)
async def test_set_position_untemplated_custom_percentage_range(hass, mqtt_mock):
"""Test setting cover position via template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"position_topic": "state-topic",
"command_topic": "command-topic",
"set_position_topic": "position-topic",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_POSITION: 38},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("position-topic", 62, 0, False)
async def test_no_command_topic(hass, mqtt_mock):
"""Test with no command topic."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 240
async def test_no_payload_stop(hass, mqtt_mock):
"""Test with no stop payload."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": None,
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 3
async def test_with_command_topic_and_tilt(hass, mqtt_mock):
"""Test with command topic and tilt config."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"command_topic": "test",
"platform": "mqtt",
"name": "test",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("cover.test").attributes["supported_features"] == 251
async def test_tilt_defaults(hass, mqtt_mock):
"""Test the defaults."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command",
"tilt_status_topic": "tilt-status",
}
},
)
await hass.async_block_till_done()
state_attributes_dict = hass.states.get("cover.test").attributes
assert ATTR_CURRENT_TILT_POSITION in state_attributes_dict
current_cover_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_position == STATE_UNKNOWN
async def test_tilt_via_invocation_defaults(hass, mqtt_mock):
"""Test tilt defaults on close/open."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 100, 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 0, 0, False)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 100, 0, False)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "100")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 0, 0, False)
async def test_tilt_given_value(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 80, 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
mqtt_mock.async_publish.reset_mock()
# Close tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 80, 0, False)
mqtt_mock.async_publish.reset_mock()
# Open tilt status would be received from device when non-optimistic
async_fire_mqtt_message(hass, "tilt-status-topic", "80")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
async def test_tilt_given_value_optimistic(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 80,
"tilt_closed_value": 25,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 80
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 80, 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 25
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
async def test_tilt_given_value_altered_range(hass, mqtt_mock):
"""Test tilting to a given value."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 25,
"tilt_closed_value": 0,
"tilt_min": 0,
"tilt_max": 50,
"tilt_optimistic": True,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_OPEN_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_CLOSE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 0, 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_TOGGLE_COVER_TILT,
{ATTR_ENTITY_ID: "cover.test"},
blocking=True,
)
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
async def test_tilt_via_topic(hass, mqtt_mock):
"""Test tilt by updating status via MQTT."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template(hass, mqtt_mock):
"""Test tilt by updating status via MQTT and template."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "0")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "50")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "25")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_via_topic_template_altered_range(hass, mqtt_mock):
"""Test tilt status via MQTT and template with altered tilt range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_status_template": "{{ (value | multiply(0.01)) | int }}",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tilt-status-topic", "99")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 0
async_fire_mqtt_message(hass, "tilt-status-topic", "5000")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 100
async_fire_mqtt_message(hass, "tilt-status-topic", "2500")
current_cover_tilt_position = hass.states.get("cover.test").attributes[
ATTR_CURRENT_TILT_POSITION
]
assert current_cover_tilt_position == 50
async def test_tilt_position(hass, mqtt_mock):
"""Test tilt via method invocation."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 50, 0, False)
async def test_tilt_position_altered_range(hass, mqtt_mock):
"""Test tilt via method invocation with altered range."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"qos": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"tilt_opened_value": 400,
"tilt_closed_value": 125,
"tilt_min": 0,
"tilt_max": 50,
}
},
)
await hass.async_block_till_done()
await hass.services.async_call(
cover.DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: "cover.test", ATTR_TILT_POSITION: 50},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with("tilt-command-topic", 25, 0, False)
async def test_find_percentage_in_range_defaults(hass, mqtt_mock):
"""Test find percentage in range with default range."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"tilt_invert_state": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 44
assert mqtt_cover.find_percentage_in_range(44, "cover") == 44
async def test_find_percentage_in_range_altered(hass, mqtt_mock):
"""Test find percentage in range with altered range."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"tilt_invert_state": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 40
assert mqtt_cover.find_percentage_in_range(120, "cover") == 40
async def test_find_percentage_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find percentage in range with default range but inverted."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"tilt_invert_state": True,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(44) == 56
assert mqtt_cover.find_percentage_in_range(44, "cover") == 56
async def test_find_percentage_in_range_altered_inverted(hass, mqtt_mock):
"""Test find percentage in range with altered range and inverted."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"tilt_invert_state": True,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_percentage_in_range(120) == 60
assert mqtt_cover.find_percentage_in_range(120, "cover") == 60
async def test_find_in_range_defaults(hass, mqtt_mock):
"""Test find in range with default range."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 100,
"position_closed": 0,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"tilt_invert_state": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(44) == 44
assert mqtt_cover.find_in_range_from_percent(44, "cover") == 44
async def test_find_in_range_altered(hass, mqtt_mock):
"""Test find in range with altered range."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 180,
"position_closed": 80,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"tilt_invert_state": False,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(40) == 120
assert mqtt_cover.find_in_range_from_percent(40, "cover") == 120
async def test_find_in_range_defaults_inverted(hass, mqtt_mock):
"""Test find in range with default range but inverted."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 0,
"position_closed": 100,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 100,
"tilt_closed_position": 0,
"tilt_min": 0,
"tilt_max": 100,
"tilt_optimistic": False,
"tilt_invert_state": True,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(56) == 44
assert mqtt_cover.find_in_range_from_percent(56, "cover") == 44
async def test_find_in_range_altered_inverted(hass, mqtt_mock):
"""Test find in range with altered range and inverted."""
mqtt_cover = MqttCover(
{
"name": "cover.test",
"state_topic": "state-topic",
"get_position_topic": None,
"command_topic": "command-topic",
"availability_topic": None,
"tilt_command_topic": "tilt-command-topic",
"tilt_status_topic": "tilt-status-topic",
"qos": 0,
"retain": False,
"state_open": "OPEN",
"state_closed": "CLOSE",
"position_open": 80,
"position_closed": 180,
"payload_open": "OPEN",
"payload_close": "CLOSE",
"payload_stop": "STOP",
"payload_available": None,
"payload_not_available": None,
"optimistic": False,
"value_template": None,
"tilt_open_position": 180,
"tilt_closed_position": 80,
"tilt_min": 80,
"tilt_max": 180,
"tilt_optimistic": False,
"tilt_invert_state": True,
"set_position_topic": None,
"set_position_template": None,
"unique_id": None,
"device_config": None,
},
None,
None,
)
assert mqtt_cover.find_in_range_from_percent(60) == 120
assert mqtt_cover.find_in_range_from_percent(60, "cover") == 120
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_valid_device_class(hass, mqtt_mock):
"""Test the setting of a valid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "garage",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state.attributes.get("device_class") == "garage"
async def test_invalid_device_class(hass, mqtt_mock):
"""Test the setting of an invalid sensor class."""
assert await async_setup_component(
hass,
cover.DOMAIN,
{
cover.DOMAIN: {
"platform": "mqtt",
"name": "test",
"device_class": "abc123",
"state_topic": "test-topic",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("cover.test")
assert state is None
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique_id option only creates one cover per id."""
config = {
cover.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, cover.DOMAIN, config)
async def test_discovery_removal_cover(hass, mqtt_mock, caplog):
"""Test removal of discovered cover."""
data = '{ "name": "test",' ' "command_topic": "test_topic" }'
await help_test_discovery_removal(hass, mqtt_mock, caplog, cover.DOMAIN, data)
async def test_discovery_update_cover(hass, mqtt_mock, caplog):
"""Test update of discovered cover."""
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
await help_test_discovery_update(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, data2
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer",' ' "command_topic": "test_topic#" }'
data2 = '{ "name": "Milk",' ' "command_topic": "test_topic" }'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, cover.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT cover device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, cover.DOMAIN, DEFAULT_CONFIG
)
| apache-2.0 |
ishay2b/tensorflow | tensorflow/python/training/learning_rate_decay.py | 22 | 16419 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for exponential_decay.")
with ops.name_scope(name, "ExponentialDecay",
[learning_rate, global_step,
decay_steps, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
return math_ops.multiply(learning_rate, math_ops.pow(decay_rate, p),
name=name)
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100000 steps, 0.5
for steps 100001 to 110000, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `buondaries` do not match, or types of all
`values` do not match.
"""
with ops.name_scope(name, "PiecewiseConstant",
[x, boundaries, values, name]) as name:
x = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
boundaries = ops.convert_n_to_tensor(boundaries)
for b in boundaries:
if b.dtype.base_dtype != x.dtype.base_dtype:
raise ValueError(
"Boundaries (%s) must have the same dtype as x (%s)." % (
b.dtype.base_dtype, x.dtype.base_dtype))
# TODO(rdipietro): Ensure that boundaries' elements are strictly increasing.
values = ops.convert_n_to_tensor(values)
for v in values[1:]:
if v.dtype.base_dtype != values[0].dtype.base_dtype:
raise ValueError(
"Values must have elements all with the same dtype (%s vs %s)." % (
values[0].dtype.base_dtype, v.dtype.base_dtype))
pred_fn_pairs = {}
pred_fn_pairs[x <= boundaries[0]] = lambda: values[0]
pred_fn_pairs[x > boundaries[-1]] = lambda: values[-1]
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x > low) & (x <= high)
pred_fn_pairs[pred] = lambda v=v: v
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def polynomial_decay(learning_rate, global_step, decay_steps,
end_learning_rate=0.0001, power=1.0,
cycle=False, name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for polynomial_decay.")
with ops.name_scope(name, "PolynomialDecay",
[learning_rate, global_step,
decay_steps, end_learning_rate, power]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
end_learning_rate = math_ops.cast(end_learning_rate, dtype)
power = math_ops.cast(power, dtype)
if cycle:
# Find the first multiple of decay_steps that is bigger than global_step.
decay_steps = math_ops.multiply(decay_steps,
math_ops.ceil(global_step / decay_steps))
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step = math_ops.minimum(global_step, decay_steps)
p = math_ops.div(global_step, decay_steps)
return math_ops.add(math_ops.multiply(learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate, name=name)
def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for natural_exp_decay.")
with ops.name_scope(name, "NaturalExpDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(math_ops.multiply(math_ops.negative(decay_rate), p))
return math_ops.multiply(learning_rate, exponent, name=name)
def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * t)
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
"""
if global_step is None:
raise ValueError("global_step is required for inverse_time_decay.")
with ops.name_scope(name, "InverseTimeDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), learning_rate.dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.div(learning_rate, denom, name=name)
| apache-2.0 |
amousset/ansible | lib/ansible/plugins/connections/local.py | 58 | 5547 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
import os
import shutil
import subprocess
import select
import fcntl
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.plugins.connections import ConnectionBase
class Connection(ConnectionBase):
''' Local based connections '''
@property
def transport(self):
''' used to identify this connection object '''
return 'local'
def _connect(self, port=None):
''' connect to the local host; nothing to do here '''
if not self._connected:
self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
self._connected = True
return self
def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True):
''' run a command on the local host '''
super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable)
self._display.debug("in local.exec_command()")
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
self._display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
# FIXME: cwd= needs to be set to the basedir of the playbook
self._display.debug("opening command with Popen()")
p = subprocess.Popen(
cmd,
shell=isinstance(cmd, basestring),
executable=executable, #cwd=...
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self._display.debug("done running command with Popen()")
if self._play_context.prompt and self._play_context.become_pass and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
become_output = ''
while not self.check_become_success(become_output) and not self.check_password_prompt(become_output):
rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._play_context.timeout)
if p.stdout in rfd:
chunk = p.stdout.read()
elif p.stderr in rfd:
chunk = p.stderr.read()
else:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + become_output)
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + become_output)
become_output += chunk
if not self.check_become_success(become_output):
p.stdin.write(self._play_context.become_pass + '\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
self._display.debug("getting output with communicate()")
stdout, stderr = p.communicate()
self._display.debug("done communicating")
self._display.debug("done with local.exec_command()")
return (p.returncode, '', stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to local '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
try:
shutil.copyfile(in_path, out_path)
except shutil.Error:
raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
except IOError as e:
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e))
def fetch_file(self, in_path, out_path):
''' fetch a file from local to local -- for copatibility '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
self.put_file(in_path, out_path)
def close(self):
''' terminate the connection; nothing to do here '''
self._connected = False
| gpl-3.0 |
mozilla/kitsune | kitsune/questions/tests/test_api.py | 1 | 28190 | import json
from datetime import datetime, timedelta
from unittest import mock
import actstream.actions
from actstream.models import Follow
from nose.tools import eq_, ok_, raises
from rest_framework.test import APIClient
from rest_framework.exceptions import APIException
from taggit.models import Tag
from kitsune.sumo.tests import TestCase
from kitsune.questions import api
from kitsune.questions.models import Question, Answer
from kitsune.questions.tests import (
tags_eq,
QuestionFactory,
AnswerFactory,
QuestionVoteFactory,
AnswerVoteFactory,
)
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.sumo.urlresolvers import reverse
from kitsune.tags.tests import TagFactory
from kitsune.users.templatetags.jinja_helpers import profile_avatar
from kitsune.users.models import Profile
from kitsune.users.tests import UserFactory, add_permission
class TestQuestionSerializerDeserialization(TestCase):
def setUp(self):
self.user = UserFactory()
self.product = ProductFactory()
self.topic = TopicFactory(product=self.product)
self.request = mock.Mock()
self.request.user = self.user
self.context = {
"request": self.request,
}
self.data = {
"creator": self.user.profile,
"title": "How do I test programs?",
"content": "Help, I don't know what to do.",
"product": self.product.slug,
"topic": self.topic.slug,
}
def test_it_works(self):
serializer = api.QuestionSerializer(context=self.context, data=self.data)
serializer.is_valid(raise_exception=True)
def test_automatic_creator(self):
del self.data["creator"]
serializer = api.QuestionSerializer(context=self.context, data=self.data)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
eq_(obj.creator, self.user)
def test_product_required(self):
del self.data["product"]
serializer = api.QuestionSerializer(context=self.context, data=self.data)
ok_(not serializer.is_valid())
eq_(
serializer.errors,
{
"product": ["This field is required."],
"topic": ["A product must be specified to select a topic."],
},
)
def test_topic_required(self):
del self.data["topic"]
serializer = api.QuestionSerializer(context=self.context, data=self.data)
ok_(not serializer.is_valid())
eq_(
serializer.errors,
{
"topic": ["This field is required."],
},
)
def test_topic_disambiguation(self):
# First make another product, and a colliding topic.
# It has the same slug, but a different product.
new_product = ProductFactory()
TopicFactory(product=new_product, slug=self.topic.slug)
serializer = api.QuestionSerializer(context=self.context, data=self.data)
serializer.is_valid(raise_exception=True)
obj = serializer.save()
eq_(obj.topic, self.topic)
def test_solution_is_readonly(self):
q = QuestionFactory()
a = AnswerFactory(question=q)
self.data["solution"] = a.id
serializer = api.QuestionSerializer(context=self.context, data=self.data, instance=q)
serializer.is_valid(raise_exception=True)
serializer.save()
eq_(q.solution, None)
class TestQuestionSerializerSerialization(TestCase):
def setUp(self):
self.asker = UserFactory()
self.helper1 = UserFactory()
self.helper2 = UserFactory()
self.question = QuestionFactory(creator=self.asker)
def _names(self, *users):
return sorted(
(
{
"username": u.username,
"display_name": Profile.objects.get(user=u).name,
"avatar": profile_avatar(u),
}
for u in users
),
key=lambda d: d["username"],
)
def _answer(self, user):
return AnswerFactory(question=self.question, creator=user)
def test_no_votes(self):
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data["num_votes"], 0)
def test_with_votes(self):
QuestionVoteFactory(question=self.question)
QuestionVoteFactory(question=self.question)
QuestionVoteFactory()
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data["num_votes"], 2)
def test_just_asker(self):
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data["involved"], self._names(self.asker))
def test_one_answer(self):
self._answer(self.helper1)
serializer = api.QuestionSerializer(instance=self.question)
eq_(
sorted(serializer.data["involved"], key=lambda d: d["username"]),
self._names(self.asker, self.helper1),
)
def test_asker_and_response(self):
self._answer(self.helper1)
self._answer(self.asker)
serializer = api.QuestionSerializer(instance=self.question)
eq_(
sorted(serializer.data["involved"], key=lambda d: d["username"]),
self._names(self.asker, self.helper1),
)
def test_asker_and_two_answers(self):
self._answer(self.helper1)
self._answer(self.asker)
self._answer(self.helper2)
serializer = api.QuestionSerializer(instance=self.question)
eq_(
sorted(serializer.data["involved"], key=lambda d: d["username"]),
self._names(self.asker, self.helper1, self.helper2),
)
def test_solution_is_id(self):
a = self._answer(self.helper1)
self.question.solution = a
self.question.save()
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data["solution"], a.id)
def test_creator_is_object(self):
serializer = api.QuestionSerializer(instance=self.question)
eq_(
serializer.data["creator"],
{
"username": self.question.creator.username,
"display_name": Profile.objects.get(user=self.question.creator).display_name,
"avatar": profile_avatar(self.question.creator),
},
)
def test_with_tags(self):
self.question.tags.add("tag1")
self.question.tags.add("tag2")
serializer = api.QuestionSerializer(instance=self.question)
eq_(
serializer.data["tags"],
[
{"name": "tag1", "slug": "tag1"},
{"name": "tag2", "slug": "tag2"},
],
)
class TestQuestionViewSet(TestCase):
def setUp(self):
self.client = APIClient()
def test_create(self):
u = UserFactory()
p = ProductFactory()
t = TopicFactory(product=p)
self.client.force_authenticate(user=u)
data = {
"title": "How do I start Firefox?",
"content": "Seriously, what do I do?",
"product": p.slug,
"topic": t.slug,
}
eq_(Question.objects.count(), 0)
res = self.client.post(reverse("question-list"), data)
eq_(res.status_code, 201)
eq_(Question.objects.count(), 1)
q = Question.objects.all()[0]
eq_(q.title, data["title"])
eq_(q.content, data["content"])
eq_(q.content_parsed, res.data["content"])
def test_delete_permissions(self):
u1 = UserFactory()
u2 = UserFactory()
q = QuestionFactory(creator=u1)
# Anonymous user can't delete
self.client.force_authenticate(user=None)
res = self.client.delete(reverse("question-detail", args=[q.id]))
eq_(res.status_code, 401) # Unauthorized
# Non-owner can't delete
self.client.force_authenticate(user=u2)
res = self.client.delete(reverse("question-detail", args=[q.id]))
eq_(res.status_code, 403) # Forbidden
# Owner can delete
self.client.force_authenticate(user=u1)
res = self.client.delete(reverse("question-detail", args=[q.id]))
eq_(res.status_code, 204) # No content
def test_solve(self):
q = QuestionFactory()
a = AnswerFactory(question=q)
self.client.force_authenticate(user=q.creator)
res = self.client.post(reverse("question-solve", args=[q.id]), data={"answer": a.id})
eq_(res.status_code, 204)
q = Question.objects.get(id=q.id)
eq_(q.solution, a)
def test_filter_is_taken_true(self):
q1 = QuestionFactory()
q2 = QuestionFactory()
q2.take(q1.creator)
url = reverse("question-list") + "?is_taken=1"
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(res.data["count"], 1)
eq_(res.data["results"][0]["id"], q2.id)
def test_filter_is_taken_false(self):
q1 = QuestionFactory()
q2 = QuestionFactory()
q2.take(q1.creator)
url = reverse("question-list") + "?is_taken=0"
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(res.data["count"], 1)
eq_(res.data["results"][0]["id"], q1.id)
def test_filter_is_taken_expired(self):
q = QuestionFactory()
# "take" the question, but with an expired timer.
q.taken_by = UserFactory()
q.taken_until = datetime.now() - timedelta(seconds=60)
url = reverse("question-list") + "?is_taken=1"
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(res.data["count"], 0)
def test_filter_taken_by_username(self):
q1 = QuestionFactory()
q2 = QuestionFactory()
q2.take(q1.creator)
url = reverse("question-list") + "?taken_by=" + q1.creator.username
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(res.data["count"], 1)
eq_(res.data["results"][0]["id"], q2.id)
def test_helpful(self):
q = QuestionFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-helpful", args=[q.id]))
eq_(res.status_code, 200)
eq_(res.data, {"num_votes": 1})
eq_(Question.objects.get(id=q.id).num_votes, 1)
def test_helpful_double_vote(self):
q = QuestionFactory()
u = UserFactory()
QuestionVoteFactory(question=q, creator=u)
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-helpful", args=[q.id]))
eq_(res.status_code, 409)
# It's 1, not 0, because one was created above. The failure cause is
# if the number of votes is 2, one from above and one from the api call.
eq_(Question.objects.get(id=q.id).num_votes, 1)
def test_helpful_question_not_editable(self):
q = QuestionFactory(is_locked=True)
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-helpful", args=[q.id]))
eq_(res.status_code, 403)
eq_(Question.objects.get(id=q.id).num_votes, 0)
def test_ordering(self):
q1 = QuestionFactory()
q2 = QuestionFactory()
res = self.client.get(reverse("question-list"))
eq_(res.data["results"][0]["id"], q2.id)
eq_(res.data["results"][1]["id"], q1.id)
res = self.client.get(reverse("question-list") + "?ordering=id")
eq_(res.data["results"][0]["id"], q1.id)
eq_(res.data["results"][1]["id"], q2.id)
res = self.client.get(reverse("question-list") + "?ordering=-id")
eq_(res.data["results"][0]["id"], q2.id)
eq_(res.data["results"][1]["id"], q1.id)
def test_filter_product_with_slug(self):
p1 = ProductFactory()
p2 = ProductFactory()
q1 = QuestionFactory(product=p1)
QuestionFactory(product=p2)
querystring = "?product={0}".format(p1.slug)
res = self.client.get(reverse("question-list") + querystring)
eq_(len(res.data["results"]), 1)
eq_(res.data["results"][0]["id"], q1.id)
def test_filter_creator_with_username(self):
q1 = QuestionFactory()
QuestionFactory()
querystring = "?creator={0}".format(q1.creator.username)
res = self.client.get(reverse("question-list") + querystring)
eq_(res.status_code, 200)
eq_(len(res.data["results"]), 1)
eq_(res.data["results"][0]["id"], q1.id)
def test_filter_involved(self):
q1 = QuestionFactory()
a1 = AnswerFactory(question=q1)
q2 = QuestionFactory(creator=a1.creator)
querystring = "?involved={0}".format(q1.creator.username)
res = self.client.get(reverse("question-list") + querystring)
eq_(res.status_code, 200)
eq_(len(res.data["results"]), 1)
eq_(res.data["results"][0]["id"], q1.id)
querystring = "?involved={0}".format(q2.creator.username)
res = self.client.get(reverse("question-list") + querystring)
eq_(res.status_code, 200)
eq_(len(res.data["results"]), 2)
# The API has a default sort, so ordering will be consistent.
eq_(res.data["results"][0]["id"], q2.id)
eq_(res.data["results"][1]["id"], q1.id)
def test_is_taken(self):
q = QuestionFactory()
u = UserFactory()
q.take(u)
url = reverse("question-detail", args=[q.id])
res = self.client.get(url)
eq_(res.status_code, 200)
eq_(res.data["taken_by"]["username"], u.username)
def test_take(self):
q = QuestionFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-take", args=[q.id]))
eq_(res.status_code, 204)
q = Question.objects.get(id=q.id)
eq_(q.taken_by, u)
def test_take_by_owner(self):
q = QuestionFactory()
self.client.force_authenticate(user=q.creator)
res = self.client.post(reverse("question-take", args=[q.id]))
eq_(res.status_code, 400)
q = Question.objects.get(id=q.id)
eq_(q.taken_by, None)
def test_take_conflict(self):
u1 = UserFactory()
u2 = UserFactory()
taken_until = datetime.now() + timedelta(seconds=30)
q = QuestionFactory(taken_until=taken_until, taken_by=u1)
self.client.force_authenticate(user=u2)
res = self.client.post(reverse("question-take", args=[q.id]))
eq_(res.status_code, 409)
q = Question.objects.get(id=q.id)
eq_(q.taken_by, u1)
def test_follow(self):
q = QuestionFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-follow", args=[q.id]))
eq_(res.status_code, 204)
f = Follow.objects.get(user=u)
eq_(f.follow_object, q)
eq_(f.actor_only, False)
def test_unfollow(self):
q = QuestionFactory()
u = UserFactory()
actstream.actions.follow(u, q, actor_only=False)
eq_(Follow.objects.filter(user=u).count(), 1) # pre-condition
self.client.force_authenticate(user=u)
res = self.client.post(reverse("question-unfollow", args=[q.id]))
eq_(res.status_code, 204)
eq_(Follow.objects.filter(user=u).count(), 0)
def test_add_tags(self):
q = QuestionFactory()
eq_(0, q.tags.count())
u = UserFactory()
add_permission(u, Tag, "add_tag")
self.client.force_authenticate(user=u)
res = self.client.post(
reverse("question-add-tags", args=[q.id]),
content_type="application/json",
data=json.dumps({"tags": ["test", "more", "tags"]}),
)
eq_(res.status_code, 200)
eq_(3, q.tags.count())
def test_remove_tags(self):
q = QuestionFactory()
q.tags.add("test")
q.tags.add("more")
q.tags.add("tags")
eq_(3, q.tags.count())
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(
reverse("question-remove-tags", args=[q.id]),
content_type="application/json",
data=json.dumps({"tags": ["more", "tags"]}),
)
eq_(res.status_code, 204)
eq_(1, q.tags.count())
def test_bleaching(self):
"""Tests whether question content is bleached."""
q = QuestionFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("question-detail", args=[q.id])
res = self.client.get(url)
eq_(res.status_code, 200)
assert "<unbleached>" not in res.data["content"]
def test_auto_tagging(self):
"""Test that questions created via the API are auto-tagged."""
TagFactory(name="desktop")
q = QuestionFactory()
self.client.force_authenticate(user=q.creator)
tags_eq(q, [])
res = self.client.post(
reverse("question-set-metadata", args=[q.id]),
content_type="application/json",
data=json.dumps({"name": "product", "value": "desktop"}),
)
eq_(res.status_code, 200)
tags_eq(q, [])
res = self.client.post(
reverse("question-auto-tag", args=[q.id]), content_type="application/json"
)
eq_(res.status_code, 204)
tags_eq(q, ["desktop"])
class TestAnswerSerializerDeserialization(TestCase):
def test_no_votes(self):
a = AnswerFactory()
serializer = api.AnswerSerializer(instance=a)
eq_(serializer.data["num_helpful_votes"], 0)
eq_(serializer.data["num_unhelpful_votes"], 0)
def test_with_votes(self):
a = AnswerFactory()
AnswerVoteFactory(answer=a, helpful=True)
AnswerVoteFactory(answer=a, helpful=True)
AnswerVoteFactory(answer=a, helpful=False)
AnswerVoteFactory()
serializer = api.AnswerSerializer(instance=a)
eq_(serializer.data["num_helpful_votes"], 2)
eq_(serializer.data["num_unhelpful_votes"], 1)
class TestAnswerViewSet(TestCase):
def setUp(self):
self.client = APIClient()
def test_create(self):
q = QuestionFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
data = {
"question": q.id,
"content": "You just need to click the fox.",
}
eq_(Answer.objects.count(), 0)
res = self.client.post(reverse("answer-list"), data)
eq_(res.status_code, 201)
eq_(Answer.objects.count(), 1)
a = Answer.objects.all()[0]
eq_(a.content, data["content"])
eq_(a.content_parsed, res.data["content"])
eq_(a.question, q)
def test_delete_permissions(self):
u1 = UserFactory()
u2 = UserFactory()
a = AnswerFactory(creator=u1)
# Anonymous user can't delete
self.client.force_authenticate(user=None)
res = self.client.delete(reverse("answer-detail", args=[a.id]))
eq_(res.status_code, 401) # Unauthorized
# Non-owner can't deletea
self.client.force_authenticate(user=u2)
res = self.client.delete(reverse("answer-detail", args=[a.id]))
eq_(res.status_code, 403) # Forbidden
# Owner can delete
self.client.force_authenticate(user=u1)
res = self.client.delete(reverse("answer-detail", args=[a.id]))
eq_(res.status_code, 204) # No content
def test_ordering(self):
a1 = AnswerFactory()
a2 = AnswerFactory()
res = self.client.get(reverse("answer-list"))
eq_(res.data["results"][0]["id"], a2.id)
eq_(res.data["results"][1]["id"], a1.id)
res = self.client.get(reverse("answer-list") + "?ordering=id")
eq_(res.data["results"][0]["id"], a1.id)
eq_(res.data["results"][1]["id"], a2.id)
res = self.client.get(reverse("answer-list") + "?ordering=-id")
eq_(res.data["results"][0]["id"], a2.id)
eq_(res.data["results"][1]["id"], a1.id)
def test_helpful(self):
a = AnswerFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("answer-helpful", args=[a.id]))
eq_(res.status_code, 200)
eq_(res.data, {"num_helpful_votes": 1, "num_unhelpful_votes": 0})
eq_(Answer.objects.get(id=a.id).num_votes, 1)
def test_helpful_double_vote(self):
a = AnswerFactory()
u = UserFactory()
AnswerVoteFactory(answer=a, creator=u)
self.client.force_authenticate(user=u)
res = self.client.post(reverse("answer-helpful", args=[a.id]))
eq_(res.status_code, 409)
# It's 1, not 0, because one was created above. The failure cause is
# if the number of votes is 2, one from above and one from the api call.
eq_(Answer.objects.get(id=a.id).num_votes, 1)
def test_helpful_answer_not_editable(self):
q = QuestionFactory(is_locked=True)
a = AnswerFactory(question=q)
u = UserFactory()
self.client.force_authenticate(user=u)
res = self.client.post(reverse("answer-helpful", args=[a.id]))
eq_(res.status_code, 403)
eq_(Answer.objects.get(id=a.id).num_votes, 0)
def test_follow(self):
a = AnswerFactory()
u = UserFactory()
self.client.force_authenticate(user=u)
eq_(Follow.objects.filter(user=u).count(), 0) # pre-condition
res = self.client.post(reverse("answer-follow", args=[a.id]))
eq_(res.status_code, 204)
f = Follow.objects.get(user=u)
eq_(f.follow_object, a)
eq_(f.actor_only, False)
def test_unfollow(self):
a = AnswerFactory()
u = UserFactory()
actstream.actions.follow(u, a, actor_only=False)
eq_(Follow.objects.filter(user=u).count(), 1) # pre-condition
self.client.force_authenticate(user=u)
res = self.client.post(reverse("answer-unfollow", args=[a.id]))
eq_(res.status_code, 204)
eq_(Follow.objects.filter(user=u).count(), 0)
def test_bleaching(self):
"""Tests whether answer content is bleached."""
a = AnswerFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("answer-detail", args=[a.id])
res = self.client.get(url)
eq_(res.status_code, 200)
assert "<unbleached>" not in res.data["content"]
class TestQuestionFilter(TestCase):
def setUp(self):
self.filter_instance = api.QuestionFilter()
self.queryset = Question.objects.all()
def filter(self, filter_data):
return self.filter_instance.filter_metadata(
self.queryset, "metadata", json.dumps(filter_data)
)
def test_filter_involved(self):
q1 = QuestionFactory()
a1 = AnswerFactory(question=q1)
q2 = QuestionFactory(creator=a1.creator)
qs = self.filter_instance.filter_involved(
self.queryset, "filter_involved", q1.creator.username
)
eq_(list(qs), [q1])
qs = self.filter_instance.filter_involved(
self.queryset, "filter_involved", q2.creator.username
)
# The filter does not have a strong order.
qs = sorted(qs, key=lambda q: q.id)
eq_(qs, [q1, q2])
def test_filter_is_solved(self):
q1 = QuestionFactory()
a1 = AnswerFactory(question=q1)
q1.solution = a1
q1.save()
q2 = QuestionFactory()
qs = self.filter_instance.filter_is_solved(self.queryset, "is_solved", True)
eq_(list(qs), [q1])
qs = self.filter_instance.filter_is_solved(self.queryset, "is_solved", False)
eq_(list(qs), [q2])
def test_filter_solved_by(self):
q1 = QuestionFactory()
a1 = AnswerFactory(question=q1)
q1.solution = a1
q1.save()
q2 = QuestionFactory()
AnswerFactory(question=q2, creator=a1.creator)
q3 = QuestionFactory()
a3 = AnswerFactory(question=q3)
q3.solution = a3
q3.save()
qs = self.filter_instance.filter_solved_by(self.queryset, "solved_by", a1.creator.username)
eq_(list(qs), [q1])
qs = self.filter_instance.filter_solved_by(self.queryset, "solved_by", a3.creator.username)
eq_(list(qs), [q3])
@raises(APIException)
def test_metadata_not_json(self):
self.filter_instance.filter_metadata(self.queryset, "metadata", "not json")
@raises(APIException)
def test_metadata_bad_json(self):
self.filter_instance.filter_metadata(self.queryset, "metadata", "not json")
def test_single_filter_match(self):
q1 = QuestionFactory(metadata={"os": "Linux"})
QuestionFactory(metadata={"os": "OSX"})
res = self.filter({"os": "Linux"})
eq_(list(res), [q1])
def test_single_filter_no_match(self):
QuestionFactory(metadata={"os": "Linux"})
QuestionFactory(metadata={"os": "OSX"})
res = self.filter({"os": "Windows 8"})
eq_(list(res), [])
def test_multi_filter_is_and(self):
q1 = QuestionFactory(metadata={"os": "Linux", "category": "troubleshooting"})
QuestionFactory(metadata={"os": "OSX", "category": "troubleshooting"})
res = self.filter({"os": "Linux", "category": "troubleshooting"})
eq_(list(res), [q1])
def test_list_value_is_or(self):
q1 = QuestionFactory(metadata={"os": "Linux"})
q2 = QuestionFactory(metadata={"os": "OSX"})
QuestionFactory(metadata={"os": "Windows 7"})
res = self.filter({"os": ["Linux", "OSX"]})
eq_(sorted(res, key=lambda q: q.id), [q1, q2])
def test_none_value_is_missing(self):
q1 = QuestionFactory(metadata={})
QuestionFactory(metadata={"os": "Linux"})
res = self.filter({"os": None})
eq_(list(res), [q1])
def test_list_value_with_none(self):
q1 = QuestionFactory(metadata={"os": "Linux"})
q2 = QuestionFactory(metadata={})
QuestionFactory(metadata={"os": "Windows 7"})
res = self.filter({"os": ["Linux", None]})
eq_(sorted(res, key=lambda q: q.id), [q1, q2])
def test_is_taken(self):
u = UserFactory()
taken_until = datetime.now() + timedelta(seconds=30)
q = QuestionFactory(taken_by=u, taken_until=taken_until)
QuestionFactory()
res = self.filter_instance.filter_is_taken(self.queryset, "is_taken", True)
eq_(list(res), [q])
def test_is_not_taken(self):
u = UserFactory()
taken_until = datetime.now() + timedelta(seconds=30)
QuestionFactory(taken_by=u, taken_until=taken_until)
q = QuestionFactory()
res = self.filter_instance.filter_is_taken(self.queryset, "is_taken", False)
eq_(list(res), [q])
def test_is_taken_expired(self):
u = UserFactory()
taken_until = datetime.now() - timedelta(seconds=30)
QuestionFactory(taken_by=u, taken_until=taken_until)
res = self.filter_instance.filter_is_taken(self.queryset, "is_taken", True)
eq_(list(res), [])
def test_is_not_taken_expired(self):
u = UserFactory()
taken_until = datetime.now() - timedelta(seconds=30)
q = QuestionFactory(taken_by=u, taken_until=taken_until)
res = self.filter_instance.filter_is_taken(self.queryset, "is_taken", False)
eq_(list(res), [q])
def test_it_works_with_users_who_have_gotten_first_contrib_emails(self):
# This flag caused a regression, tracked in bug 1163855.
# The error was that the help text on the field was a str instead of a
# unicode. Yes, really, that matters apparently.
u = UserFactory(profile__first_answer_email_sent=True)
QuestionFactory(creator=u)
url = reverse("question-list")
res = self.client.get(url)
eq_(res.status_code, 200)
| bsd-3-clause |
soumyanishan/azure-linux-extensions | VMAccess/vmaccess.py | 1 | 18922 | #!/usr/bin/env python
#
# VMAccess extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import re
import shutil
import sys
import tempfile
import time
import traceback
import Utils.HandlerUtil as Util
from waagentloader import load_waagent
waagent = load_waagent()
# Define global variables
ExtensionShortName = 'VMAccess'
BeginCertificateTag = '-----BEGIN CERTIFICATE-----'
EndCertificateTag = '-----END CERTIFICATE-----'
OutputSplitter = ';'
SshdConfigPath = '/etc/ssh/sshd_config'
def main():
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout')
waagent.Log("%s started to handle." % (ExtensionShortName))
waagent.MyDistro = waagent.GetMyDistro()
try:
for a in sys.argv[1:]:
if re.match("^([-/]*)(disable)", a):
disable()
elif re.match("^([-/]*)(uninstall)", a):
uninstall()
elif re.match("^([-/]*)(install)", a):
install()
elif re.match("^([-/]*)(enable)", a):
enable()
elif re.match("^([-/]*)(update)", a):
update()
except Exception as e:
err_msg = "Failed with error: {0}, {1}".format(e, traceback.format_exc())
waagent.Error(err_msg)
def install():
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context('Uninstall')
hutil.do_exit(0, 'Install', 'success', '0', 'Install Succeeded')
def enable():
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context('Enable')
try:
_forcibly_reset_chap(hutil)
reset_ssh = None
remove_user = None
protect_settings = hutil.get_protected_settings()
if protect_settings:
reset_ssh = protect_settings.get('reset_ssh')
remove_user = protect_settings.get('remove_user')
if remove_user and _is_sshd_config_modified(protect_settings):
hutil.error("Cannot reset sshd_config and remove a user in one operation.")
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message="(03002)Argument error, conflicting operations")
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable failed.')
# check port each time the VM boots up
if reset_ssh:
_open_ssh_port()
hutil.log("Succeeded in check and open ssh port.")
hutil.exit_if_enabled()
if _is_sshd_config_modified(protect_settings):
_backup_sshd_config(SshdConfigPath)
if reset_ssh:
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="reset-ssh")
_reset_sshd_config(SshdConfigPath)
hutil.log("Succeeded in reset sshd_config.")
if remove_user:
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="remove-user")
_remove_user_account(remove_user, hutil)
_set_user_account_pub_key(protect_settings, hutil)
if _is_sshd_config_modified(protect_settings):
waagent.MyDistro.restartSshService()
check_and_repair_disk(hutil)
hutil.do_exit(0, 'Enable', 'success', '0', 'Enable succeeded.')
except Exception as e:
hutil.error(("Failed to enable the extension with error: {0}, "
"stack trace: {1}").format(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable failed.')
def _forcibly_reset_chap(hutil):
name = "ChallengeResponseAuthentication"
config = waagent.GetFileContents(SshdConfigPath).split("\n")
for i in range(0, len(config)):
if config[i].startswith(name) and "no" in config[i].lower():
waagent.AddExtensionEvent(name=hutil.get_name(), op="sshd", isSuccess=True, message="ChallengeResponseAuthentication no")
return
waagent.AddExtensionEvent(name=hutil.get_name(), op="sshd", isSuccess=True, message="ChallengeResponseAuthentication yes")
_backup_sshd_config(SshdConfigPath)
_set_sshd_config(config, name, "no")
waagent.ReplaceFileContentsAtomic(SshdConfigPath, "\n".join(config))
waagent.MyDistro.restartSshService()
def _is_sshd_config_modified(protected_settings):
result = protected_settings.get('reset_ssh') or protected_settings.get('password')
return result is not None
def uninstall():
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context('Uninstall')
hutil.do_exit(0, 'Uninstall', 'success', '0', 'Uninstall succeeded')
def disable():
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context('Disable')
hutil.do_exit(0, 'Disable', 'success', '0', 'Disable Succeeded')
def update():
hutil = Util.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context('Update')
hutil.do_exit(0, 'Update', 'success', '0', 'Update Succeeded')
def _remove_user_account(user_name, hutil):
hutil.log("Removing user account")
try:
sudoers = _get_other_sudoers(user_name)
waagent.MyDistro.DeleteAccount(user_name)
_save_other_sudoers(sudoers)
except Exception as e:
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message="(02102)Failed to remove user.")
raise Exception("Failed to remove user {0}".format(e))
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=True,
message="Successfully removed user")
def _set_user_account_pub_key(protect_settings, hutil):
ovf_xml = waagent.GetFileContents('/var/lib/waagent/ovf-env.xml')
ovf_env = waagent.OvfEnv().Parse(ovf_xml)
# user name must be provided if set ssh key or password
if not protect_settings or not protect_settings.has_key('username'):
return
user_name = protect_settings['username']
user_pass = protect_settings.get('password')
cert_txt = protect_settings.get('ssh_key')
expiration = protect_settings.get('expiration')
no_convert = False
if not user_pass and not cert_txt and not ovf_env.SshPublicKeys:
raise Exception("No password or ssh_key is specified.")
if user_pass is not None and len(user_pass) == 0:
user_pass = None
hutil.log("empty passwords are not allowed, ignoring password reset")
# Reset user account and password, password could be empty
sudoers = _get_other_sudoers(user_name)
error_string = waagent.MyDistro.CreateAccount(
user_name, user_pass, expiration, None)
_save_other_sudoers(sudoers)
if error_string is not None:
err_msg = "Failed to create the account or set the password"
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message="(02101)" + err_msg)
raise Exception(err_msg + " with " + error_string)
hutil.log("Succeeded in create the account or set the password.")
# Allow password authentication if user_pass is provided
if user_pass is not None:
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="create-user-with-password")
_allow_password_auth()
# Reset ssh key with the new public key passed in or reuse old public key.
if cert_txt or len(ovf_env.SshPublicKeys) > 0:
if cert_txt and cert_txt.strip().lower().startswith("ssh-rsa"):
no_convert = True
try:
pub_path = os.path.join('/home/', user_name, '.ssh',
'authorized_keys')
ovf_env.UserName = user_name
if no_convert:
if cert_txt:
pub_path = ovf_env.PrepareDir(pub_path)
final_cert_txt = cert_txt
if(not cert_txt.endswith("\n")):
final_cert_txt = final_cert_txt+"\n"
waagent.AppendFileContents(pub_path, final_cert_txt)
waagent.MyDistro.setSelinuxContext(pub_path,
'unconfined_u:object_r:ssh_home_t:s0')
waagent.ChangeOwner(pub_path, user_name)
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="create-user")
hutil.log("Succeeded in resetting ssh_key.")
else:
err_msg = "Failed to reset ssh key because the cert content is empty."
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message="(02100)"+err_msg)
else:
if cert_txt:
_save_cert_str_as_file(cert_txt, 'temp.crt')
else:
for pkey in ovf_env.SshPublicKeys:
if pkey[1]:
shutil.copy(
os.path.join(waagent.LibDir, pkey[0] + '.crt'),
os.path.join(os.getcwd(), 'temp.crt'))
break
pub_path = ovf_env.PrepareDir(pub_path)
retcode = waagent.Run(waagent.Openssl + " x509 -in temp.crt -noout -pubkey > temp.pub")
if retcode > 0:
raise Exception("Failed to generate public key file.")
waagent.MyDistro.sshDeployPublicKey('temp.pub', pub_path)
waagent.MyDistro.setSelinuxContext(pub_path,
'unconfined_u:object_r:ssh_home_t:s0')
waagent.ChangeOwner(pub_path, user_name)
os.remove('temp.pub')
os.remove('temp.crt')
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="create-user")
hutil.log("Succeeded in resetting ssh_key.")
except Exception as e:
hutil.log(str(e))
waagent.AddExtensionEvent(name=hutil.get_name(),
op=waagent.WALAEventOperation.Enable,
isSuccess=False,
message="(02100)Failed to reset ssh key.")
def _get_other_sudoers(userName):
sudoersFile = '/etc/sudoers.d/waagent'
if not os.path.isfile(sudoersFile):
return None
sudoers = waagent.GetFileContents(sudoersFile).split("\n")
pattern = '^{0}\s'.format(userName)
sudoers = filter(lambda x : re.match(pattern, x) is None, sudoers)
return sudoers
def _save_other_sudoers(sudoers):
sudoersFile = '/etc/sudoers.d/waagent'
if sudoers is None:
return
waagent.AppendFileContents(sudoersFile, "\n".join(sudoers))
os.chmod("/etc/sudoers.d/waagent", 0o440)
def _allow_password_auth():
config = waagent.GetFileContents(SshdConfigPath).split("\n")
_set_sshd_config(config, "PasswordAuthentication", "yes")
waagent.ReplaceFileContentsAtomic(SshdConfigPath, "\n".join(config))
def _set_sshd_config(config, name, val):
notfound = True
for i in range(0, len(config)):
if config[i].startswith(name):
config[i] = "{0} {1}".format(name, val)
notfound = False
elif config[i].startswith("Match"):
# Match block must be put in the end of sshd config
break
if notfound:
config.insert(i, "{0} {1}".format(name, val))
return config
def _reset_sshd_config(sshd_file_path):
distro = platform.dist()
distro_name = distro[0]
version = distro[1]
config_file_path = os.path.join(os.getcwd(), 'resources', '%s_%s' % (distro_name, version))
if not(os.path.exists(config_file_path)):
config_file_path = os.path.join(os.getcwd(), 'resources', '%s_%s' % (distro_name, 'default'))
if not(os.path.exists(config_file_path)):
config_file_path = os.path.join(os.getcwd(), 'resources', 'default')
if distro_name == "CoreOS":
# Parse sshd port from config_file_path
sshd_port = 22
regex = re.compile(r"^Port\s+(\d+)", re.VERBOSE)
with open(config_file_path) as f:
for line in f:
match = regex.match(line)
if match:
sshd_port = match.group(1)
break
# Prepare cloud init config for coreos-cloudinit
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
cfg_tempfile = f.name
cfg_content = "#cloud-config\n\n"
# Overwrite /etc/ssh/sshd_config
cfg_content += "write_files:\n"
cfg_content += " - path: {0}\n".format(sshd_file_path)
cfg_content += " permissions: 0600\n"
cfg_content += " owner: root:root\n"
cfg_content += " content: |\n"
for line in waagent.GetFileContents(config_file_path).split('\n'):
cfg_content += " {0}\n".format(line)
# Change the sshd port in /etc/systemd/system/sshd.socket
cfg_content += "\ncoreos:\n"
cfg_content += " units:\n"
cfg_content += " - name: sshd.socket\n"
cfg_content += " command: restart\n"
cfg_content += " content: |\n"
cfg_content += " [Socket]\n"
cfg_content += " ListenStream={0}\n".format(sshd_port)
cfg_content += " Accept=yes\n"
waagent.SetFileContents(cfg_tempfile, cfg_content)
waagent.Run("coreos-cloudinit -from-file " + cfg_tempfile, chk_err=False)
os.remove(cfg_tempfile)
else:
shutil.copyfile(config_file_path, sshd_file_path)
waagent.MyDistro.restartSshService()
def _backup_sshd_config(sshd_file_path):
if os.path.exists(sshd_file_path):
backup_file_name = '%s_%s' % (
sshd_file_path, time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
shutil.copyfile(sshd_file_path, backup_file_name)
def _save_cert_str_as_file(cert_txt, file_name):
cert_start = cert_txt.find(BeginCertificateTag)
if cert_start >= 0:
cert_txt = cert_txt[cert_start + len(BeginCertificateTag):]
cert_end = cert_txt.find(EndCertificateTag)
if cert_end >= 0:
cert_txt = cert_txt[:cert_end]
cert_txt = cert_txt.strip()
cert_txt = "{0}\n{1}\n{2}\n".format(BeginCertificateTag, cert_txt, EndCertificateTag)
waagent.SetFileContents(file_name, cert_txt)
def _open_ssh_port():
_del_rule_if_exists('INPUT -p tcp -m tcp --dport 22 -j DROP')
_del_rule_if_exists('INPUT -p tcp -m tcp --dport 22 -j REJECT')
_del_rule_if_exists('INPUT -p -j DROP')
_del_rule_if_exists('INPUT -p -j REJECT')
_insert_rule_if_not_exists('INPUT -p tcp -m tcp --dport 22 -j ACCEPT')
_del_rule_if_exists('OUTPUT -p tcp -m tcp --sport 22 -j DROP')
_del_rule_if_exists('OUTPUT -p tcp -m tcp --sport 22 -j REJECT')
_del_rule_if_exists('OUTPUT -p -j DROP')
_del_rule_if_exists('OUTPUT -p -j REJECT')
_insert_rule_if_not_exists('OUTPUT -p tcp -m tcp --dport 22 -j ACCEPT')
def _del_rule_if_exists(rule_string):
cmd_result = waagent.RunGetOutput("iptables-save")
while cmd_result[0] == 0 and (rule_string in cmd_result[1]):
waagent.Run("iptables -D %s" % rule_string)
cmd_result = waagent.RunGetOutput("iptables-save")
def _insert_rule_if_not_exists(rule_string):
cmd_result = waagent.RunGetOutput("iptables-save")
if cmd_result[0] == 0 and (rule_string not in cmd_result[1]):
waagent.Run("iptables -I %s" % rule_string)
def check_and_repair_disk(hutil):
public_settings = hutil.get_public_settings()
if public_settings:
check_disk = public_settings.get('check_disk')
repair_disk = public_settings.get('repair_disk')
disk_name = public_settings.get('disk_name')
if check_disk and repair_disk:
err_msg = ("check_disk and repair_disk was both specified."
"Only one of them can be specified")
hutil.error(err_msg)
hutil.do_exit(1, 'Enable', 'error', '0', 'Enable failed.')
if check_disk:
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="check_disk")
outretcode = _fsck_check(hutil)
hutil.log("Successfully checked disk")
return outretcode
if repair_disk:
waagent.AddExtensionEvent(name=hutil.get_name(), op="scenario", isSuccess=True, message="repair_disk")
outdata = _fsck_repair(hutil, disk_name)
hutil.log("Repaired and remounted disk")
return outdata
def _fsck_check(hutil):
try:
retcode = waagent.Run("fsck -As -y")
if retcode > 0:
hutil.log(retcode)
raise Exception("Disk check was not successful")
else:
return retcode
except Exception as e:
hutil.error("Failed to run disk check with error: {0}, {1}".format(
str(e), traceback.format_exc()))
hutil.do_exit(1, 'Check', 'error', '0', 'Check failed.')
def _fsck_repair(hutil, disk_name):
# first unmount disks and loop devices lazy + forced
try:
cmd_result = waagent.Run("umount -f /%s" % disk_name)
if cmd_result != 0:
# Fail fast
hutil.log("Failed to unmount disk: %s" % disk_name)
# run repair
retcode = waagent.Run("fsck -AR -y")
hutil.log("Ran fsck with return code: %d" % retcode)
if retcode == 0:
retcode, output = waagent.RunGetOutput("mount")
hutil.log(output)
return output
else:
raise Exception("Failed to mount disks")
except Exception as e:
hutil.error("{0}, {1}".format(str(e), traceback.format_exc()))
hutil.do_exit(1, 'Repair','error','0', 'Repair failed.')
if __name__ == '__main__' :
main()
| apache-2.0 |
seet61/one | v2/requests/packages/chardet/latin1prober.py | 950 | 5241 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
| bsd-2-clause |
sql-analytics/openvstorage | ovs/extensions/snmp/ovssnmpserver.py | 2 | 26954 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OVS SNMP bootstrap module
"""
from ovs.extensions.snmp.server import SNMPServer
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.exceptions import KeyNotFoundException
from ovs.plugin.provider.configuration import Configuration
from ovs.dal.dataobjectlist import DataObjectList
import signal, time
STORAGE_PREFIX = "ovs_snmp"
NAMING_SCHEME = "1.3.6.1.4.1.29961.%s.%s.%s"
class OVSSNMPServer():
"""
Bootstrap the SNMP Server, hook into ovs
"""
def __init__(self):
"""
Init
"""
signal.signal(signal.SIGTERM, self.SIGTERM)
from ovs.extensions.generic.system import System
my_storagerouter = System.get_my_storagerouter()
self.host = my_storagerouter.ip
self.port = 161
self.persistent = PersistentFactory.get_client()
self.users = self.get_users()
# Load from model
self.assigned_oids = {}
self.instance_oid = 0
# Book-keeping
self.model_oids = set()
def get_users(self):
"""
Returns all saved users from the database
"""
user_prefix = "{}_user_".format(STORAGE_PREFIX)
users = self.persistent.prefix(user_prefix)
return [self.persistent.get(user) for user in users]
def add_user(self, username, password, privatekey):
"""
Adds an snmp v3 user to the database
"""
storage_key = "{}_user_{}".format(STORAGE_PREFIX, username)
value = (username, password, privatekey, 'authPriv')
self.persistent.set(storage_key, value)
def configure(self, group, key, value):
"""
Store/Update a key in persistent storage
e.g "dal", "enabled", True
"""
storage_key = "{}_config_{}_{}".format(STORAGE_PREFIX, group, key)
self.persistent.set(storage_key, value)
def _save_model_oid(self, guid, oid, attribute):
"""
Store the mapping between oid and object guid
"""
key = "{}_dal2oid_{}_{}".format(STORAGE_PREFIX, guid, attribute)
self.persistent.set(key, oid)
def _get_model_oid(self, guid, attribute):
"""
Return the oid for a specific guid/attribute
"""
key = "{}_dal2oid_{}_{}".format(STORAGE_PREFIX, guid, attribute)
try:
return self.persistent.get(key)
except KeyNotFoundException:
return None
def get_mappings(self, guid):
"""
Return the oids and the attributes - dict
"""
mapping = {}
key = "{}_dal2oid_{}_".format(STORAGE_PREFIX, guid)
keys = self.persistent.prefix(key)
for key in keys:
oid = self.persistent.get(key)
attr_name = key.replace(STORAGE_PREFIX, '').replace('_dal2oid_', '')
guid = attr_name.split('_')[0]
attr_name = attr_name.replace('{}_'.format(guid), '')
mapping[oid] = attr_name
return mapping
def _check_added(self, model_object):
for class_id in self.assigned_oids:
if model_object.guid in self.assigned_oids[class_id]:
return True
return False
def _register_dal_model(self, class_id, model_object, attribute, attrb_oid, key=None, func=None, atype=str):
"""
Register a DAL model as OID
class_id is the unique id of the type
an unique id for the instance will be generated
attrb_oid: an unique id for the attribute (hardcoded)
together they will form oid that will be stored in the model
"""
self.model_oids.add(model_object.guid)
if not class_id in self.assigned_oids:
self.assigned_oids[class_id] = []
self.instance_oid = 0
if not model_object.guid in self.assigned_oids[class_id]:
self.assigned_oids[class_id].append(model_object.guid)
def get_function():
print('[DEBUG] Get function for %s %s %s' % (model_object.guid, attribute, str(key)))
if func:
print('[DEBUG] Calling lambda function %s' % func)
return func(model_object)
try:
value = getattr(model_object, attribute)
if key and isinstance(value, dict):
value = value[key]
elif key:
value = getattr(value, key)
elif not key and (isinstance(value, list) or isinstance(value, DataObjectList)):
value = len(value)
except Exception as ex:
print('[EXCEPTION] %s' % (str(ex)))
if atype == int:
value = -1
elif atype == str:
value = 'N/A'
try:
return atype(value)
except Exception as ex:
print('[EXCEPTION 2] %s' % (str(ex)))
return 0
oid = self.server.register_custom_oid(class_id, self.instance_oid, attrb_oid, get_function, atype)
self._save_model_oid(model_object.guid, oid, "{}_{}".format(attribute, key) if key else attribute)
return oid
def _bootstrap_dal_models(self):
"""
Load/hook dal models as snmp oids
"""
_guids = set()
enabled_key = "{}_config_dal_enabled".format(STORAGE_PREFIX)
self.instance_oid = 0
try:
enabled = self.persistent.get(enabled_key)
except KeyNotFoundException:
enabled = True # Enabled by default, can be disabled by setting the key
if enabled:
from ovs.dal.lists.vdisklist import VDiskList
from ovs.dal.lists.storagerouterlist import StorageRouterList
from ovs.dal.lists.pmachinelist import PMachineList
from ovs.dal.lists.vmachinelist import VMachineList
from ovs.dal.lists.vpoollist import VPoolList
from ovs.dal.lists.storagedriverlist import StorageDriverList
for storagerouter in StorageRouterList.get_storagerouters():
_guids.add(storagerouter.guid)
if not self._check_added(storagerouter):
self._register_dal_model(10, storagerouter, 'guid', "0")
self._register_dal_model(10, storagerouter, 'name', "1")
self._register_dal_model(10, storagerouter, 'pmachine', "3", key = 'host_status')
self._register_dal_model(10, storagerouter, 'description', "4")
self._register_dal_model(10, storagerouter, 'devicename', "5")
self._register_dal_model(10, storagerouter, 'failover_mode', "6")
self._register_dal_model(10, storagerouter, 'ip', "8")
self._register_dal_model(10, storagerouter, 'machineid', "9")
self._register_dal_model(10, storagerouter, 'status', "10")
self._register_dal_model(10, storagerouter, '#vdisks', "11",
func = lambda storagerouter: len([vdisk for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
atype = int)
self._register_dal_model(10, storagerouter, '#vmachines', "12",
func = lambda storagerouter: len(set([vdisk.vmachine.guid for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id])),
atype = int)
self._register_dal_model(10, storagerouter, '#stored_data', "13",
func = lambda storagerouter: sum([vdisk.vmachine.stored_data for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
atype = int)
self.instance_oid += 1
for vm in VMachineList.get_vmachines():
_guids.add(vm.guid)
if not self._check_added(vm):
if vm.is_vtemplate:
self._register_dal_model(11, vm, 'guid', "0")
self._register_dal_model(11, vm, 'name', "1")
def _children(vmt):
children = 0
disks = [vd.guid for vd in vmt.vdisks]
for vdisk in [vdisk.parent_vdisk_guid for item in [vm.vdisks for vm in VMachineList.get_vmachines() if not vm.is_vtemplate] for vdisk in item]:
for disk in disks:
if vdisk == disk:
children += 1
return children
self._register_dal_model(11, vm, '#children', 2, func = _children, atype = int)
self.instance_oid += 1
for vm in VMachineList.get_vmachines():
_guids.add(vm.guid)
if not self._check_added(vm):
if not vm.is_vtemplate:
self._register_dal_model(0, vm, 'guid', "0")
self._register_dal_model(0, vm, 'name', "1")
self._register_dal_model(0, vm, 'statistics', "2.0", key = "operations", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.2", key = "data_read", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.6", key = "write_operations", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.8", key = "read_operations_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.10", key = "backend_write_operations", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.11", key = "backend_data_read", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.12", key = "cache_hits", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.16", key = "backend_data_written", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.17", key = "data_read_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.18", key = "read_operations", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.20", key = "data_written_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.23", key = "timestamp", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.26", key = "backend_read_operations", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.27", key = "data_written", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.30", key = "operations_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.33", key = "write_operations_ps", atype = int)
self._register_dal_model(0, vm, 'statistics', "2.34", key = "data_transferred", atype = int)
self._register_dal_model(0, vm, 'stored_data', "3", atype = int)
self._register_dal_model(0, vm, 'description', "4")
self._register_dal_model(0, vm, 'devicename', "5")
self._register_dal_model(0, vm, 'failover_mode', "6")
self._register_dal_model(0, vm, 'hypervisorid', "7")
self._register_dal_model(0, vm, 'ip', "8")
self._register_dal_model(0, vm, 'status', "10")
self._register_dal_model(0, vm, 'stored_data', "10", atype = int)
self._register_dal_model(0, vm, 'snapshots', "11", atype = int)
self._register_dal_model(0, vm, 'vdisks', "12", atype = int)
self._register_dal_model(0, vm, 'FOC', '13',
func = lambda vm: 'DEGRADED' if all(item == 'DEGRADED' for item in [vd.info['failover_mode'] for vd in vm.vdisks]) else 'OK')
self.instance_oid += 1
for vd in VDiskList.get_vdisks():
_guids.add(vd.guid)
if not self._check_added(vd):
self._register_dal_model(1, vd, 'guid', "0")
self._register_dal_model(1, vd, 'name', "1")
self._register_dal_model(1, vd, 'statistics', "2.0", key = "operations", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.1", key = "data_written_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.2", key = "data_read", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.6", key = "write_operations", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.8", key = "read_operations_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.10", key = "backend_write_operations", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.11", key = "backend_data_read", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.12", key = "cache_hits", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.16", key = "backend_data_written", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.17", key = "data_read_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.18", key = "read_operations", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.20", key = "cluster_cache_misses_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.23", key = "timestamp", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.26", key = "backend_read_operations", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.27", key = "data_written", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.30", key = "operations_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.33", key = "write_operations_ps", atype = int)
self._register_dal_model(1, vd, 'statistics', "2.34", key = "data_transferred", atype = int)
self._register_dal_model(1, vd, 'info', "3", key = 'stored', atype = int)
self._register_dal_model(1, vd, 'info', "4", key = 'failover_mode', atype = int)
self._register_dal_model(1, vd, 'snapshots', "5", atype = int)
self.instance_oid += 1
for pm in PMachineList.get_pmachines():
_guids.add(pm.guid)
if not self._check_added(pm):
self._register_dal_model(2, pm, 'guid', "0")
self._register_dal_model(2, pm, 'name', "1")
self._register_dal_model(2, pm, 'host_status', "2")
self.instance_oid += 1
for vp in VPoolList.get_vpools():
_guids.add(vp.guid)
if not self._check_added(vp):
self._register_dal_model(3, vp, 'guid', "0")
self._register_dal_model(3, vp, 'name', "1")
self._register_dal_model(3, vp, 'statistics', "2.0", key = "operations", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.2", key = "data_read", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.6", key = "write_operations", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.8", key = "read_operations_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.10", key = "backend_write_operations", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.11", key = "backend_data_read", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.12", key = "cache_hits", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.16", key = "backend_data_written", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.17", key = "data_read_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.18", key = "read_operations", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.20", key = "data_written_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.23", key = "timestamp", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.26", key = "backend_read_operations", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.27", key = "data_written", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.30", key = "operations_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.33", key = "write_operations_ps", atype = int)
self._register_dal_model(3, vp, 'statistics', "2.34", key = "data_transferred", atype = int)
self._register_dal_model(3, vp, 'status', "3")
self._register_dal_model(3, vp, 'description', "4")
self._register_dal_model(3, vp, 'vdisks', "5", atype = int)
self._register_dal_model(3, vp, '#vmachines', "6",
func = lambda vp: len(set([vd.vmachine.guid for vd in vp.vdisks])),
atype = int)
self.instance_oid += 1
for storagedriver in StorageDriverList.get_storagedrivers():
_guids.add(storagedriver.guid)
if not self._check_added(storagedriver):
self._register_dal_model(4, storagedriver, 'guid', "0")
self._register_dal_model(4, storagedriver, 'name', "1")
self._register_dal_model(4, storagedriver, 'stored_data', "2", atype = int)
self.instance_oid += 1
reload = False
for object_guid in list(self.model_oids):
if not object_guid in _guids:
self.model_oids.remove(object_guid)
reload = True
if reload:
self._reload_snmp()
def _polling_functions(self):
def _poll(timestamp_float):
start = time.time()
print('[POLLING] %s' % (str(timestamp_float)))
self._bootstrap_dal_models()
print('[DONE POLLING] %s' % (time.time() - start))
self.server.register_polling_function(_poll, 300) #5 minutes
def _reload_snmp(self):
"""
Restart snmp
"""
print('[SNMP] Reload started')
import os
os.system('echo "service ovs-snmp restart" | at now')
def start(self):
"""
Start
"""
self.server = SNMPServer(host = self.host,
port = self.port,
users = self.users,
naming_scheme = NAMING_SCHEME)
self._polling_functions()
self.server.start()
def SIGTERM(self, signum, frame):
"""
Clean stop on SIGTERM
"""
print('Got sigterm...')
self.server.stop()
if __name__ == '__main__':
server = OVSSNMPServer()
server.start()
| apache-2.0 |
p0psicles/SickRage | lib/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| gpl-3.0 |
firebase/firebase-admin-python | setup.py | 1 | 2584 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup file for distribution artifacts."""
from __future__ import print_function
from os import path
import sys
from setuptools import setup
(major, minor) = (sys.version_info.major, sys.version_info.minor)
if major != 3 or minor < 6:
print('firebase_admin requires python >= 3.6', file=sys.stderr)
sys.exit(1)
# Read in the package metadata per recommendations from:
# https://packaging.python.org/guides/single-sourcing-package-version/
about_path = path.join(path.dirname(path.abspath(__file__)), 'firebase_admin', '__about__.py')
about = {}
with open(about_path) as fp:
exec(fp.read(), about) # pylint: disable=exec-used
long_description = ('The Firebase Admin Python SDK enables server-side (backend) Python developers '
'to integrate Firebase into their services and applications.')
install_requires = [
'cachecontrol>=0.12.6',
'google-api-core[grpc] >= 1.22.1, < 2.0.0dev; platform.python_implementation != "PyPy"',
'google-api-python-client >= 1.7.8',
'google-cloud-firestore>=2.1.0; platform.python_implementation != "PyPy"',
'google-cloud-storage>=1.37.1',
]
setup(
name=about['__title__'],
version=about['__version__'],
description='Firebase Admin Python SDK',
long_description=long_description,
url=about['__url__'],
author=about['__author__'],
license=about['__license__'],
keywords='firebase cloud development',
install_requires=install_requires,
packages=['firebase_admin'],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: Apache Software License',
],
)
| apache-2.0 |
pasiegel/SickGear | lib/rtorrent/compat.py | 180 | 1258 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
def is_py3():
return sys.version_info[0] == 3
if is_py3():
import xmlrpc.client as xmlrpclib
else:
import xmlrpclib
| gpl-3.0 |
d-mittal/pystruct | examples/plot_exact_learning.py | 5 | 1333 | """
=========================================
Efficient exact learning of 1-slack SSVMs
=========================================
This example illustrates the role of approximate inference and caching
in exact learning of a 1-slack SSVM.
Please see plot_objetive_curve.py for an interpretation of the curves.
We start learning by using an undergenerating inference method,
QPBO-based alpha expansion. One the algorithm can not find a violated
constraint any more, we switch to a less efficient but exact inference
procedure, branch-and-bound based on AD3.
The switch to AD3 can be seen in the graph after the (approximate)
primal objective and the cutting plane lower bound touch. (zoom in)
After the switch to exact inference, the red circles show the true
primal objective.
"""
from pystruct.models import DirectionalGridCRF
import pystruct.learners as ssvm
from pystruct.datasets import generate_blocks_multinomial
from pystruct.plot_learning import plot_learning
X, Y = generate_blocks_multinomial(noise=2, n_samples=20, seed=1)
crf = DirectionalGridCRF(inference_method="qpbo", neighborhood=4)
clf = ssvm.OneSlackSSVM(model=crf, n_jobs=-1, inference_cache=100,
show_loss_every=10,
switch_to=("ad3", {'branch_and_bound': True}))
clf.fit(X, Y)
plot_learning(clf, time=False)
| bsd-2-clause |
hoytak/lazyrunner | lazyrunner/manager.py | 1 | 4915 | """
A class that manages a batch of sessions.
"""
import time, logging, sys
from os import makedirs, remove
from os.path import join, expanduser, exists, split, abspath, normpath
from treedict import TreeDict
from pnstructures import PNodeCommon, PNode
import parameters as parameter_module
import pmodule
import loading
import configuration
################################################################################
def __initLoggingSystem(custom_opttree):
# get one filled in with the defaults
opttree = configuration.setupOptionTree(custom_opttree, None, False)
# Set up the logging stuff
logging.basicConfig(
format = opttree.logging.format,
datefmt = opttree.logging.datefmt,
level = logging.DEBUG if opttree.verbose else logging.INFO
)
if hasattr(logging, "captureWarnings"):
logging.captureWarnings(True)
def clean(custom_opttree = None, **kwargs):
if custom_opttree is None:
custom_opttree = TreeDict()
custom_opttree.update(TreeDict.fromdict(kwargs))
__initLoggingSystem(custom_opttree)
log = logging.getLogger("Configuration")
opttree = configuration.setupOptionTree(custom_opttree, log, False)
loading.cleanAll(opttree)
################################################################################
__manager = None
def initialize(custom_opttree = None, **kwargs):
global __manager
if __manager is not None:
raise RuntimeError("Initialize has already been called! Call reset first to reinitialize.")
# fill in the custom opt_tree here with default options.
if custom_opttree is None:
custom_opttree = TreeDict()
custom_opttree.update(TreeDict.fromdict(kwargs))
__initLoggingSystem(custom_opttree)
# set up the manager
__manager = _RunManager(custom_opttree)
def manager():
global __manager
if __manager is None:
raise RuntimeError("Initialize must be called before manager is available.")
return __manager
def reset():
global __manager
__manager = None
class _RunManager(object):
"""
A class providing an API for interfacing directly with a
lazyrunner project.
"""
def __init__(self, custom_opttree):
"""
Initializes a lazyrunner environment. The environment options
are identical to those on the command line.
project_directory = '.',
debug_mode = False,
verbose = False,
no_cache = False,
force = False,
cache_read_only = False,
cache_directory = None,
no_compile = False,
config_module = 'conf'
"""
self.log = logging.getLogger("Manager")
################################################################################
# Init all the module lookup stuff
opttree = configuration.setupOptionTree(custom_opttree, self.log, False)
loading.resetAndInitModuleLoading(opttree)
opttree = configuration.setupOptionTree(custom_opttree, self.log, True)
self.opttree = opttree
pmodule.resetAndInitialize()
parameter_module.resetAndInitialize()
loading.resetAndInitModules(self.opttree)
parameter_module.finalize()
pmodule.finalize()
########################################################################################
# General Control Functions
def getResults(self, modules = None, presets = [], parameters = None):
common = PNodeCommon(self.opttree)
ptree = parameter_module.getParameterTree(presets, parameters = parameters)
if modules is None:
modules = pmodule.getCurrentRunQueue()
if type(modules) is str:
modules = [modules]
results = common.getResults(ptree, modules)
return dict(zip(modules, results))
def getPresetHelp(self, width = None):
return '\n'.join(parameters_module.getPresetHelpList(width = width))
def updatePresetCompletionCache(self, preset_name_cache_file):
parameter_module.presets.updatePresetCompletionCache(preset_name_cache_file)
def run(modules, presets = [], project_directory = '.', options = None):
"""
Convenience function for running things directly. `options`, if given,
should be a TreeDict of configuration options.
"""
if options is None:
options = TreeDict()
else:
if type(options) is not TreeDict:
raise TypeError("options parameter needs to be a TreeDict.")
options.project_directory = project_directory
m = RunManager(options)
return m.getResults(modules, presets)
| bsd-3-clause |
otherness-space/myProject003 | my_project_003/lib/python2.7/site-packages/pip/_vendor/requests/status_codes.py | 926 | 3200 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| mit |
popazerty/SDG-e2 | lib/python/Screens/Satconfig.py | 5 | 36689 | from enigma import eDVBDB
from Screen import Screen
from Components.SystemInfo import SystemInfo
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.NimManager import nimmanager
from Components.Button import Button
from Components.Label import Label
from Components.SelectionList import SelectionList, SelectionEntryComponent
from Components.config import getConfigListEntry, config, ConfigNothing, ConfigSelection, updateConfigElement, ConfigSatlist, ConfigYesNo
from Components.Sources.List import List
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.ServiceStopScreen import ServiceStopScreen
from Screens.AutoDiseqc import AutoDiseqc
from Tools.BoundFunction import boundFunction
from time import mktime, localtime
from datetime import datetime
class NimSetup(Screen, ConfigListScreen, ServiceStopScreen):
def createSimpleSetup(self, list, mode):
nim = self.nimConfig
if mode == "single":
self.singleSatEntry = getConfigListEntry(_("Satellite"), nim.diseqcA)
list.append(self.singleSatEntry)
if nim.diseqcA.value in ("360", "560"):
list.append(getConfigListEntry(_("Use circular LNB"), nim.simpleDiSEqCSetCircularLNB))
list.append(getConfigListEntry(_("Send DiSEqC"), nim.simpleSingleSendDiSEqC))
else:
list.append(getConfigListEntry(_("Port A"), nim.diseqcA))
if mode in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
list.append(getConfigListEntry(_("Port B"), nim.diseqcB))
if mode == "diseqc_a_b_c_d":
list.append(getConfigListEntry(_("Port C"), nim.diseqcC))
list.append(getConfigListEntry(_("Port D"), nim.diseqcD))
if mode != "toneburst_a_b":
list.append(getConfigListEntry(_("Set voltage and 22KHz"), nim.simpleDiSEqCSetVoltageTone))
list.append(getConfigListEntry(_("Send DiSEqC only on satellite change"), nim.simpleDiSEqCOnlyOnSatChange))
def createPositionerSetup(self, list):
nim = self.nimConfig
if nim.diseqcMode.value == "positioner_select":
self.selectSatsEntry = getConfigListEntry(_("Press OK to select satellites"), self.nimConfig.pressOKtoList)
list.append(self.selectSatsEntry)
list.append(getConfigListEntry(_("Longitude"), nim.longitude))
list.append(getConfigListEntry(" ", nim.longitudeOrientation))
list.append(getConfigListEntry(_("Latitude"), nim.latitude))
list.append(getConfigListEntry(" ", nim.latitudeOrientation))
if SystemInfo["CanMeasureFrontendInputPower"]:
self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), nim.powerMeasurement)
list.append(self.advancedPowerMeasurement)
if nim.powerMeasurement.value:
list.append(getConfigListEntry(_("Power threshold in mA"), nim.powerThreshold))
self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), nim.turningSpeed)
list.append(self.turningSpeed)
if nim.turningSpeed.value == "fast epoch":
self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), nim.fastTurningBegin)
self.turnFastEpochEnd = getConfigListEntry(_("End time"), nim.fastTurningEnd)
list.append(self.turnFastEpochBegin)
list.append(self.turnFastEpochEnd)
else:
if nim.powerMeasurement.value:
nim.powerMeasurement.value = False
nim.powerMeasurement.save()
if not hasattr(self, 'additionalMotorOptions'):
self.additionalMotorOptions = ConfigYesNo(False)
self.showAdditionalMotorOptions = getConfigListEntry(_("Extra motor options"), self.additionalMotorOptions)
self.list.append(self.showAdditionalMotorOptions)
if self.additionalMotorOptions.value:
self.list.append(getConfigListEntry(" " + _("Horizontal turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedH))
self.list.append(getConfigListEntry(" " + _("Vertical turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedV))
self.list.append(getConfigListEntry(" " + _("Turning step size") + " [" + chr(176) + "]", nim.tuningstepsize))
self.list.append(getConfigListEntry(" " + _("Max memory positions"), nim.rotorPositions))
def createConfigMode(self):
if self.nim.isCompatible("DVB-S"):
choices = {"nothing": _("Not configured"),
"simple": _("Simple"),
"advanced": _("Advanced")}
if len(nimmanager.canEqualTo(self.slotid)) > 0:
choices["equal"] = _("Equal to")
if len(nimmanager.canDependOn(self.slotid)) > 0:
choices["satposdepends"] = _("Second cable of motorized LNB")
if len(nimmanager.canConnectTo(self.slotid)) > 0:
choices["loopthrough"] = _("Loop through to")
self.nimConfig.configMode.setChoices(choices, default = "simple")
def createSetup(self):
print "Creating setup"
self.list = [ ]
self.multiType = None
self.configMode = None
self.diseqcModeEntry = None
self.advancedSatsEntry = None
self.advancedLnbsEntry = None
self.advancedDiseqcMode = None
self.advancedUsalsEntry = None
self.advancedLof = None
self.advancedPowerMeasurement = None
self.turningSpeed = None
self.turnFastEpochBegin = None
self.turnFastEpochEnd = None
self.toneburst = None
self.committedDiseqcCommand = None
self.uncommittedDiseqcCommand = None
self.commandOrder = None
self.cableScanType = None
self.have_advanced = False
self.advancedUnicable = None
self.advancedType = None
self.advancedManufacturer = None
self.advancedSCR = None
self.advancedConnected = None
self.showAdditionalMotorOptions = None
self.selectSatsEntry = None
self.advancedSelectSatsEntry = None
self.singleSatEntry = None
if self.nim.isMultiType():
multiType = self.nimConfig.multiType
self.multiType = getConfigListEntry(_("Tuner type"), multiType)
self.list.append(self.multiType)
if self.nim.isCompatible("DVB-S"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
if self.nimConfig.configMode.value == "simple": #simple setup
self.diseqcModeEntry = getConfigListEntry(pgettext("Satellite configuration mode", "Mode"), self.nimConfig.diseqcMode)
self.list.append(self.diseqcModeEntry)
if self.nimConfig.diseqcMode.value in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
self.createSimpleSetup(self.list, self.nimConfig.diseqcMode.value)
if self.nimConfig.diseqcMode.value in ("positioner", "positioner_select"):
self.createPositionerSetup(self.list)
elif self.nimConfig.configMode.value == "equal":
choices = []
nimlist = nimmanager.canEqualTo(self.nim.slot)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Tuner"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "satposdepends":
choices = []
nimlist = nimmanager.canDependOn(self.nim.slot)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Tuner"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "loopthrough":
choices = []
print "connectable to:", nimmanager.canConnectTo(self.slotid)
connectable = nimmanager.canConnectTo(self.slotid)
for id in connectable:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Connected to"), self.nimConfig.connectedTo))
elif self.nimConfig.configMode.value == "nothing":
pass
elif self.nimConfig.configMode.value == "advanced": # advanced
# SATs
self.advancedSatsEntry = getConfigListEntry(_("Satellite"), self.nimConfig.advanced.sats)
self.list.append(self.advancedSatsEntry)
current_config_sats = self.nimConfig.advanced.sats.value
if current_config_sats in ("3605", "3606"):
self.advancedSelectSatsEntry = getConfigListEntry(_("Press OK to select satellites"), self.nimConfig.pressOKtoList)
self.list.append(self.advancedSelectSatsEntry)
self.fillListWithAdvancedSatEntrys(self.nimConfig.advanced.sat[int(current_config_sats)])
else:
cur_orb_pos = self.nimConfig.advanced.sats.orbital_position
satlist = self.nimConfig.advanced.sat.keys()
if cur_orb_pos is not None:
if cur_orb_pos not in satlist:
cur_orb_pos = satlist[0]
self.fillListWithAdvancedSatEntrys(self.nimConfig.advanced.sat[cur_orb_pos])
self.have_advanced = True
if self.nim.description == "Alps BSBE2" and config.usage.setup_level.index >= 2: # expert
self.list.append(getConfigListEntry(_("Tone amplitude"), self.nimConfig.toneAmplitude))
elif self.nim.isCompatible("DVB-C"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
if self.nimConfig.configMode.value == "enabled":
self.list.append(getConfigListEntry(_("Network ID"), self.nimConfig.cable.scan_networkid))
self.cableScanType=getConfigListEntry(_("Used service scan type"), self.nimConfig.cable.scan_type)
self.list.append(self.cableScanType)
if self.nimConfig.cable.scan_type.value == "provider":
self.list.append(getConfigListEntry(_("Provider to scan"), self.nimConfig.cable.scan_provider))
else:
if self.nimConfig.cable.scan_type.value == "bands":
# TRANSLATORS: option name, indicating which type of (DVB-C) band should be scanned. The name of the band is printed in '%s'. E.g.: 'Scan EU MID band'
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU VHF I"), self.nimConfig.cable.scan_band_EU_VHF_I))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU MID"), self.nimConfig.cable.scan_band_EU_MID))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU VHF III"), self.nimConfig.cable.scan_band_EU_VHF_III))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU UHF IV"), self.nimConfig.cable.scan_band_EU_UHF_IV))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU UHF V"), self.nimConfig.cable.scan_band_EU_UHF_V))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU SUPER"), self.nimConfig.cable.scan_band_EU_SUPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("EU HYPER"), self.nimConfig.cable.scan_band_EU_HYPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US LOW"), self.nimConfig.cable.scan_band_US_LOW))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US MID"), self.nimConfig.cable.scan_band_US_MID))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US HIGH"), self.nimConfig.cable.scan_band_US_HIGH))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US SUPER"), self.nimConfig.cable.scan_band_US_SUPER))
self.list.append(getConfigListEntry(_("Scan %s band") % ("US HYPER"), self.nimConfig.cable.scan_band_US_HYPER))
elif self.nimConfig.cable.scan_type.value == "steps":
self.list.append(getConfigListEntry(_("Frequency scan step size(khz)"), self.nimConfig.cable.scan_frequency_steps))
# TRANSLATORS: option name, indicating which type of (DVB-C) modulation should be scanned. The modulation type is printed in '%s'. E.g.: 'Scan QAM16'
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM16"), self.nimConfig.cable.scan_mod_qam16))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM32"), self.nimConfig.cable.scan_mod_qam32))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM64"), self.nimConfig.cable.scan_mod_qam64))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM128"), self.nimConfig.cable.scan_mod_qam128))
self.list.append(getConfigListEntry(_("Scan %s") % ("QAM256"), self.nimConfig.cable.scan_mod_qam256))
self.list.append(getConfigListEntry(_("Scan %s") % ("SR6900"), self.nimConfig.cable.scan_sr_6900))
self.list.append(getConfigListEntry(_("Scan %s") % ("SR6875"), self.nimConfig.cable.scan_sr_6875))
self.list.append(getConfigListEntry(_("Scan additional SR"), self.nimConfig.cable.scan_sr_ext1))
self.list.append(getConfigListEntry(_("Scan additional SR"), self.nimConfig.cable.scan_sr_ext2))
self.have_advanced = False
elif self.nim.isCompatible("DVB-T"):
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
self.have_advanced = False
if self.nimConfig.configMode.value == "enabled":
self.list.append(getConfigListEntry(_("Terrestrial provider"), self.nimConfig.terrestrial))
self.list.append(getConfigListEntry(_("Enable 5V for active antenna"), self.nimConfig.terrestrial_5V))
else:
self.have_advanced = False
self["config"].list = self.list
self["config"].l.setList(self.list)
def newConfig(self):
self.setTextKeyBlue()
checkList = (self.configMode, self.diseqcModeEntry, self.advancedSatsEntry, \
self.advancedLnbsEntry, self.advancedDiseqcMode, self.advancedUsalsEntry, \
self.advancedLof, self.advancedPowerMeasurement, self.turningSpeed, \
self.advancedType, self.advancedSCR, self.advancedManufacturer, self.advancedUnicable, self.advancedConnected, \
self.toneburst, self.committedDiseqcCommand, self.uncommittedDiseqcCommand, self.singleSatEntry, \
self.commandOrder, self.showAdditionalMotorOptions, self.cableScanType, self.multiType)
if self["config"].getCurrent() == self.multiType:
from Components.NimManager import InitNimManager
InitNimManager(nimmanager)
self.nim = nimmanager.nim_slots[self.slotid]
self.nimConfig = self.nim.config
for x in checkList:
if self["config"].getCurrent() == x:
self.createSetup()
break
def run(self):
if self.nimConfig.configMode.value == "simple":
autodiseqc_ports = 0
if self.nimConfig.diseqcMode.value == "single":
if self.nimConfig.diseqcA.orbital_position == 3600:
autodiseqc_ports = 1
elif self.nimConfig.diseqcMode.value == "diseqc_a_b":
if self.nimConfig.diseqcA.orbital_position == 3600 or self.nimConfig.diseqcB.orbital_position == 3600:
autodiseqc_ports = 2
elif self.nimConfig.diseqcMode.value == "diseqc_a_b_c_d":
if self.nimConfig.diseqcA.orbital_position == 3600 or self.nimConfig.diseqcB.orbital_position == 3600 or self.nimConfig.diseqcC.orbital_position == 3600 or self.nimConfig.diseqcD.orbital_position == 3600:
autodiseqc_ports = 4
if autodiseqc_ports:
self.autoDiseqcRun(autodiseqc_ports)
return False
if self.have_advanced and self.nim.config_mode == "advanced":
self.fillAdvancedList()
for x in self.list:
if x in (self.turnFastEpochBegin, self.turnFastEpochEnd):
# workaround for storing only hour*3600+min*60 value in configfile
# not really needed.. just for cosmetics..
tm = localtime(x[1].value)
dt = datetime(1970, 1, 1, tm.tm_hour, tm.tm_min)
x[1].value = int(mktime(dt.timetuple()))
x[1].save()
nimmanager.sec.update()
self.saveAll()
return True
def autoDiseqcRun(self, ports):
self.session.openWithCallback(self.autoDiseqcCallback, AutoDiseqc, self.slotid, ports, self.nimConfig.simpleDiSEqCSetVoltageTone, self.nimConfig.simpleDiSEqCOnlyOnSatChange)
def autoDiseqcCallback(self, result):
from Screens.Wizard import Wizard
if Wizard.instance is not None:
Wizard.instance.back()
else:
self.createSetup()
def fillListWithAdvancedSatEntrys(self, Sat):
lnbnum = int(Sat.lnb.value)
currLnb = self.nimConfig.advanced.lnb[lnbnum]
if isinstance(currLnb, ConfigNothing):
currLnb = None
# LNBs
self.advancedLnbsEntry = getConfigListEntry(_("LNB"), Sat.lnb)
self.list.append(self.advancedLnbsEntry)
if currLnb:
self.list.append(getConfigListEntry(_("Priority"), currLnb.prio))
self.advancedLof = getConfigListEntry("LOF", currLnb.lof)
self.list.append(self.advancedLof)
if currLnb.lof.value == "user_defined":
self.list.append(getConfigListEntry("LOF/L", currLnb.lofl))
self.list.append(getConfigListEntry("LOF/H", currLnb.lofh))
self.list.append(getConfigListEntry(_("Threshold"), currLnb.threshold))
if currLnb.lof.value == "unicable":
self.advancedUnicable = getConfigListEntry("Unicable "+_("Configuration mode"), currLnb.unicable)
self.list.append(self.advancedUnicable)
if currLnb.unicable.value == "unicable_user":
self.advancedSCR = getConfigListEntry(_("Channel"), currLnb.satcruser)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), currLnb.satcrvcouser[currLnb.satcruser.index]))
self.list.append(getConfigListEntry("LOF/L", currLnb.lofl))
self.list.append(getConfigListEntry("LOF/H", currLnb.lofh))
self.list.append(getConfigListEntry(_("Threshold"), currLnb.threshold))
elif currLnb.unicable.value == "unicable_matrix":
manufacturer_name = currLnb.unicableMatrixManufacturer.value
manufacturer = currLnb.unicableMatrix[manufacturer_name]
product_name = manufacturer.product.value
self.advancedManufacturer = getConfigListEntry(_("Manufacturer"), currLnb.unicableMatrixManufacturer)
self.advancedType = getConfigListEntry(_("Type"), manufacturer.product)
self.advancedSCR = getConfigListEntry(_("Channel"), manufacturer.scr[product_name])
self.list.append(self.advancedManufacturer)
self.list.append(self.advancedType)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), manufacturer.vco[product_name][manufacturer.scr[product_name].index]))
elif currLnb.unicable.value == "unicable_lnb":
manufacturer_name = currLnb.unicableLnbManufacturer.value
manufacturer = currLnb.unicableLnb[manufacturer_name]
product_name = manufacturer.product.value
self.advancedManufacturer = getConfigListEntry(_("Manufacturer"), currLnb.unicableLnbManufacturer)
self.advancedType = getConfigListEntry(_("Type"), manufacturer.product)
self.advancedSCR = getConfigListEntry(_("Channel"), manufacturer.scr[product_name])
self.list.append(self.advancedManufacturer)
self.list.append(self.advancedType)
self.list.append(self.advancedSCR)
self.list.append(getConfigListEntry(_("Frequency"), manufacturer.vco[product_name][manufacturer.scr[product_name].index]))
choices = []
connectable = nimmanager.canConnectTo(self.slotid)
for id in connectable:
choices.append((str(id), nimmanager.getNimDescription(id)))
if len(choices):
self.advancedConnected = getConfigListEntry(_("connected"), self.nimConfig.advanced.unicableconnected)
self.list.append(self.advancedConnected)
if self.nimConfig.advanced.unicableconnected.value == True:
self.nimConfig.advanced.unicableconnectedTo.setChoices(choices)
self.list.append(getConfigListEntry(_("Connected to"),self.nimConfig.advanced.unicableconnectedTo))
else: #kein Unicable
self.list.append(getConfigListEntry(_("Voltage mode"), Sat.voltage))
self.list.append(getConfigListEntry(_("Increased voltage"), currLnb.increased_voltage))
self.list.append(getConfigListEntry(_("Tone mode"), Sat.tonemode))
if lnbnum < 65:
self.advancedDiseqcMode = getConfigListEntry(_("DiSEqC mode"), currLnb.diseqcMode)
self.list.append(self.advancedDiseqcMode)
if currLnb.diseqcMode.value != "none":
self.list.append(getConfigListEntry(_("Fast DiSEqC"), currLnb.fastDiseqc))
self.toneburst = getConfigListEntry(_("Toneburst"), currLnb.toneburst)
self.list.append(self.toneburst)
self.committedDiseqcCommand = getConfigListEntry(_("DiSEqC 1.0 command"), currLnb.commitedDiseqcCommand)
self.list.append(self.committedDiseqcCommand)
if currLnb.diseqcMode.value == "1_0":
if currLnb.toneburst.index and currLnb.commitedDiseqcCommand.index:
self.list.append(getConfigListEntry(_("Command order"), currLnb.commandOrder1_0))
else:
self.uncommittedDiseqcCommand = getConfigListEntry(_("DiSEqC 1.1 command"), currLnb.uncommittedDiseqcCommand)
self.list.append(self.uncommittedDiseqcCommand)
if currLnb.uncommittedDiseqcCommand.index:
if currLnb.commandOrder.value == "ct":
currLnb.commandOrder.value = "cut"
elif currLnb.commandOrder.value == "tc":
currLnb.commandOrder.value = "tcu"
else:
if currLnb.commandOrder.index & 1:
currLnb.commandOrder.value = "tc"
else:
currLnb.commandOrder.value = "ct"
self.commandOrder = getConfigListEntry(_("Command order"), currLnb.commandOrder)
if 1 < ((1 if currLnb.uncommittedDiseqcCommand.index else 0) + (1 if currLnb.commitedDiseqcCommand.index else 0) + (1 if currLnb.toneburst.index else 0)):
self.list.append(self.commandOrder)
if currLnb.uncommittedDiseqcCommand.index:
self.list.append(getConfigListEntry(_("DiSEqC 1.1 repeats"), currLnb.diseqcRepeats))
self.list.append(getConfigListEntry(_("Sequence repeat"), currLnb.sequenceRepeat))
if currLnb.diseqcMode.value == "1_2":
if SystemInfo["CanMeasureFrontendInputPower"]:
self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), currLnb.powerMeasurement)
self.list.append(self.advancedPowerMeasurement)
if currLnb.powerMeasurement.value:
self.list.append(getConfigListEntry(_("Power threshold in mA"), currLnb.powerThreshold))
self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), currLnb.turningSpeed)
self.list.append(self.turningSpeed)
if currLnb.turningSpeed.value == "fast epoch":
self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), currLnb.fastTurningBegin)
self.turnFastEpochEnd = getConfigListEntry(_("End time"), currLnb.fastTurningEnd)
self.list.append(self.turnFastEpochBegin)
self.list.append(self.turnFastEpochEnd)
else:
if currLnb.powerMeasurement.value:
currLnb.powerMeasurement.value = False
currLnb.powerMeasurement.save()
self.advancedUsalsEntry = getConfigListEntry(_("Use USALS for this sat"), Sat.usals)
if lnbnum < 65:
self.list.append(self.advancedUsalsEntry)
if Sat.usals.value:
self.list.append(getConfigListEntry(_("Longitude"), currLnb.longitude))
self.list.append(getConfigListEntry(" ", currLnb.longitudeOrientation))
self.list.append(getConfigListEntry(_("Latitude"), currLnb.latitude))
self.list.append(getConfigListEntry(" ", currLnb.latitudeOrientation))
else:
self.list.append(getConfigListEntry(_("Stored position"), Sat.rotorposition))
if not hasattr(self, 'additionalMotorOptions'):
self.additionalMotorOptions = ConfigYesNo(False)
self.showAdditionalMotorOptions = getConfigListEntry(_("Extra motor options"), self.additionalMotorOptions)
self.list.append(self.showAdditionalMotorOptions)
if self.additionalMotorOptions.value:
self.list.append(getConfigListEntry(" " + _("Horizontal turning speed") + " [" + chr(176) + "/sec]", currLnb.turningspeedH))
self.list.append(getConfigListEntry(" " + _("Vertical turning speed") + " [" + chr(176) + "/sec]", currLnb.turningspeedV))
self.list.append(getConfigListEntry(" " + _("Turning step size") + " [" + chr(176) + "]", currLnb.tuningstepsize))
self.list.append(getConfigListEntry(" " + _("Max memory positions"), currLnb.rotorPositions))
def fillAdvancedList(self):
self.list = [ ]
self.configMode = getConfigListEntry(_("Configuration mode"), self.nimConfig.configMode)
self.list.append(self.configMode)
self.advancedSatsEntry = getConfigListEntry(_("Satellite"), self.nimConfig.advanced.sats)
self.list.append(self.advancedSatsEntry)
for x in self.nimConfig.advanced.sat.keys():
Sat = self.nimConfig.advanced.sat[x]
self.fillListWithAdvancedSatEntrys(Sat)
self["config"].list = self.list
def keyOk(self):
if self["config"].getCurrent() == self.advancedSelectSatsEntry:
conf = self.nimConfig.advanced.sat[int(self.nimConfig.advanced.sats.value)].userSatellitesList
self.session.openWithCallback(boundFunction(self.updateConfUserSatellitesList, conf), SelectSatsEntryScreen, userSatlist=conf.value)
elif self["config"].getCurrent() == self.selectSatsEntry:
conf = self.nimConfig.userSatellitesList
self.session.openWithCallback(boundFunction(self.updateConfUserSatellitesList, conf), SelectSatsEntryScreen, userSatlist=conf.value)
else:
self.keySave()
def updateConfUserSatellitesList(self, conf, val=None):
if val is not None:
conf.value = val
conf.save()
def keySave(self):
old_configured_sats = nimmanager.getConfiguredSats()
if not self.run():
return
new_configured_sats = nimmanager.getConfiguredSats()
self.unconfed_sats = old_configured_sats - new_configured_sats
self.satpos_to_remove = None
self.deleteConfirmed((None, "no"))
def deleteConfirmed(self, confirmed):
if confirmed is None:
confirmed = (None, "no")
if confirmed[1] == "yes" or confirmed[1] == "yestoall":
eDVBDB.getInstance().removeServices(-1, -1, -1, self.satpos_to_remove)
if self.satpos_to_remove is not None:
self.unconfed_sats.remove(self.satpos_to_remove)
self.satpos_to_remove = None
for orbpos in self.unconfed_sats:
self.satpos_to_remove = orbpos
orbpos = self.satpos_to_remove
try:
# why we need this cast?
sat_name = str(nimmanager.getSatDescription(orbpos))
except:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
sat_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
if confirmed[1] == "yes" or confirmed[1] == "no":
# TRANSLATORS: The satellite with name '%s' is no longer used after a configuration change. The user is asked whether or not the satellite should be deleted.
self.session.openWithCallback(self.deleteConfirmed, ChoiceBox, _("%s is no longer used. Should it be deleted?") % sat_name, [(_("Yes"), "yes"), (_("No"), "no"), (_("Yes to all"), "yestoall"), (_("No to all"), "notoall")], None, 1)
if confirmed[1] == "yestoall" or confirmed[1] == "notoall":
self.deleteConfirmed(confirmed)
break
else:
self.restoreService(_("Zap back to service before tuner setup?"))
def __init__(self, session, slotid):
Screen.__init__(self, session)
self.list = [ ]
ServiceStopScreen.__init__(self)
self.stopService()
ConfigListScreen.__init__(self, self.list)
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["key_yellow"] = Label(_("Configuration mode"))
self["key_blue"] = Label()
self["actions"] = ActionMap(["SetupActions", "SatlistShortcutAction"],
{
"ok": self.keyOk,
"save": self.keySave,
"cancel": self.keyCancel,
"changetype": self.changeConfigurationMode,
"nothingconnected": self.nothingConnectedShortcut
}, -2)
self.slotid = slotid
self.nim = nimmanager.nim_slots[slotid]
self.nimConfig = self.nim.config
self.createConfigMode()
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Reception Settings"))
def keyLeft(self):
ConfigListScreen.keyLeft(self)
if self["config"].getCurrent() in (self.advancedSelectSatsEntry, self.selectSatsEntry):
self.keyOk()
else:
self.newConfig()
def setTextKeyBlue(self):
self["key_blue"].setText("")
if self["config"].isChanged():
self["key_blue"].setText(_("Set default"))
def keyRight(self):
ConfigListScreen.keyRight(self)
if self["config"].getCurrent() in (self.advancedSelectSatsEntry, self.selectSatsEntry):
self.keyOk()
else:
self.newConfig()
def handleKeyFileCallback(self, answer):
ConfigListScreen.handleKeyFileCallback(self, answer)
self.newConfig()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.restoreService(_("Zap back to service before tuner setup?"))
def saveAll(self):
if self.nim.isCompatible("DVB-S"):
# reset connectedTo to all choices to properly store the default value
choices = []
nimlist = nimmanager.getNimListOfType("DVB-S", self.slotid)
for id in nimlist:
choices.append((str(id), nimmanager.getNimDescription(id)))
self.nimConfig.connectedTo.setChoices(choices)
# sanity check for empty sat list
if self.nimConfig.configMode.value != "satposdepends" and len(nimmanager.getSatListForNim(self.slotid)) < 1:
self.nimConfig.configMode.value = "nothing"
for x in self["config"].list:
x[1].save()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
# we need to call saveAll to reset the connectedTo choices
self.saveAll()
self.restoreService(_("Zap back to service before tuner setup?"))
def changeConfigurationMode(self):
if self.configMode:
self.nimConfig.configMode.selectNext()
self["config"].invalidate(self.configMode)
self.setTextKeyBlue()
self.createSetup()
def nothingConnectedShortcut(self):
if self["config"].isChanged():
for x in self["config"].list:
x[1].cancel()
self.setTextKeyBlue()
self.createSetup()
class NimSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.list = [None] * nimmanager.getSlotCount()
self["nimlist"] = List(self.list)
self.updateList()
self.setResultClass()
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick ,
"cancel": self.close
}, -2)
self.setTitle(_("Choose Tuner"))
def setResultClass(self):
self.resultclass = NimSetup
def okbuttonClick(self):
nim = self["nimlist"].getCurrent()
nim = nim and nim[3]
if nim is not None and not nim.empty and nim.isSupported():
self.session.openWithCallback(self.updateList, self.resultclass, nim.slot)
def showNim(self, nim):
return True
def updateList(self):
self.list = [ ]
for x in nimmanager.nim_slots:
slotid = x.slot
nimConfig = nimmanager.getNimConfig(x.slot)
text = nimConfig.configMode.value
if self.showNim(x):
if x.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends"):
text = { "loopthrough": _("Loop through to"),
"equal": _("Equal to"),
"satposdepends": _("Second cable of motorized LNB") } [nimConfig.configMode.value]
text += " " + _("Tuner") + " " + ["A", "B", "C", "D"][int(nimConfig.connectedTo.value)]
elif nimConfig.configMode.value == "nothing":
text = _("not configured")
elif nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
text = {"single": _("Single"), "toneburst_a_b": _("Toneburst A/B"), "diseqc_a_b": _("DiSEqC A/B"), "diseqc_a_b_c_d": _("DiSEqC A/B/C/D")}[nimConfig.diseqcMode.value] + "\n"
text += _("Sats") + ": "
satnames = []
if nimConfig.diseqcA.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcA.value)))
if nimConfig.diseqcMode.value in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nimConfig.diseqcB.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcB.value)))
if nimConfig.diseqcMode.value == "diseqc_a_b_c_d":
if nimConfig.diseqcC.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcC.value)))
if nimConfig.diseqcD.orbital_position < 3600:
satnames.append(nimmanager.getSatName(int(nimConfig.diseqcD.value)))
if len(satnames) <= 2:
text += ", ".join(satnames)
elif len(satnames) > 2:
# we need a newline here, since multi content lists don't support automtic line wrapping
text += ", ".join(satnames[:2]) + ",\n"
text += " " + ", ".join(satnames[2:])
elif nimConfig.diseqcMode.value in ("positioner", "positioner_select"):
text = {"positioner": _("Positioner"), "positioner_select": _("Positioner (selecting satellites)")}[nimConfig.diseqcMode.value]
text += ":"
if nimConfig.positionerMode.value == "usals":
text += "USALS"
elif nimConfig.positionerMode.value == "manual":
text += _("Manual")
else:
text = _("Simple")
elif nimConfig.configMode.value == "advanced":
text = _("Advanced")
elif x.isCompatible("DVB-T") or x.isCompatible("DVB-C"):
if nimConfig.configMode.value == "nothing":
text = _("Nothing connected")
elif nimConfig.configMode.value == "enabled":
text = _("Enabled")
if x.isMultiType():
text = _("Switchable tuner types:") + "(" + ','.join(x.getMultiTypeList().values()) + ")" + "\n" + text
if not x.isSupported():
text = _("Tuner is not supported")
self.list.append((slotid, x.friendly_full_description, text, x))
self["nimlist"].setList(self.list)
self["nimlist"].updateList(self.list)
class SelectSatsEntryScreen(Screen):
skin = """
<screen name="SelectSatsEntryScreen" position="center,center" size="560,410" title="Select Sats Entry" >
<ePixmap name="red" position="0,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="280,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="420,0" zPosition="2" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_green" position="140,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_yellow" position="280,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_blue" position="420,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;17" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="list" position="10,40" size="540,330" scrollbarMode="showNever" />
<ePixmap pixmap="skin_default/div-h.png" position="0,375" zPosition="1" size="540,2" transparent="1" alphatest="on" />
<widget name="hint" position="10,380" size="540,25" font="Regular;19" halign="center" transparent="1" />
</screen>"""
def __init__(self, session, userSatlist=[]):
Screen.__init__(self, session)
self["key_red"] = Button(_("Cancel"))
self["key_green"] = Button(_("Save"))
self["key_yellow"] = Button(_("Sort by"))
self["key_blue"] = Button(_("Select all"))
self["hint"] = Label(_("Press OK to toggle the selection"))
SatList = []
for sat in nimmanager.getSatList():
selected = False
if isinstance(userSatlist, str) and str(sat[0]) in userSatlist:
selected = True
SatList.append((sat[0], sat[1], sat[2], selected))
sat_list = [SelectionEntryComponent(x[1], x[0], x[2], x[3]) for x in SatList]
self["list"] = SelectionList(sat_list, enableWrapAround=True)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"red": self.cancel,
"green": self.save,
"yellow": self.sortBy,
"blue": self["list"].toggleAllSelection,
"save": self.save,
"cancel": self.cancel,
"ok": self["list"].toggleSelection,
}, -2)
self.setTitle(_("Select satellites"))
def save(self):
val = [x[0][1] for x in self["list"].list if x[0][3]]
self.close(str(val))
def cancel(self):
self.close(None)
def sortBy(self):
lst = self["list"].list
if len(lst) > 1:
menu = [(_("Reverse list"), "2"), (_("Standart list"), "1")]
connected_sat = [x[0][1] for x in lst if x[0][3]]
if len(connected_sat) > 0:
menu.insert(0,(_("Connected satellites"), "3"))
def sortAction(choice):
if choice:
reverse_flag = False
sort_type = int(choice[1])
if choice[1] == "2":
sort_type = reverse_flag = 1
elif choice[1] == "3":
reverse_flag = not reverse_flag
self["list"].sort(sortType=sort_type, flag=reverse_flag)
self["list"].moveToIndex(0)
self.session.openWithCallback(sortAction, ChoiceBox, title= _("Select sort method:"), list=menu)
| gpl-2.0 |
allanlei/django-backup | example/settings.py | 1 | 5087 | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'djbackup', # Or path to database file if using sqlite3.
'USER': 'djbackup', # Not used with sqlite3.
'PASSWORD': 'djbackup', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_4uay=x9gf78pd9lt^4nz!1*us2xkcma3zu@=*zruh1ti0_u64'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'backup',
'example',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
DEFAULT_FROM_EMAIL = ''
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
EMAIL_SUBJECT_PREFIX = ''
EMAIL_USE_TLS = True
| bsd-3-clause |
Versatilus/dragonfly | dragonfly/actions/action_waitwindow.py | 2 | 4043 | #
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
WaitWindow action
============================================================================
"""
import time
from dragonfly.actions.action_base import ActionBase, ActionError
from ..windows import Window
#---------------------------------------------------------------------------
class WaitWindow(ActionBase):
"""
Wait for a specific window context action.
Constructor arguments:
- *title* (*str*) --
part of the window title: not case sensitive
- *executable* (*str*) --
part of the file name of the executable; not case sensitive
- *timeout* (*int* or *float*) --
the maximum number of seconds to wait for the correct
context, after which an :class:`ActionError` will
be raised.
When this action is executed, it waits until the correct window
context is present. This window context is specified by the
desired window title of the foreground window and/or the
executable name of the foreground application. These are
specified using the constructor arguments listed above. The
substring search used is *not* case sensitive.
If the correct window context is not found within *timeout*
seconds, then this action will raise an :class:`ActionError` to
indicate the timeout.
"""
def __init__(self, title=None, executable=None,
timeout=15):
self._match_functions = []
string = []
if title is not None:
self._title = title.lower()
self._match_functions.append("_match_title")
string.append("title=%r" % self._title)
else:
self._title = None
if executable is not None:
self._executable = executable.lower()
self._match_functions.append("_match_executable")
string.append("executable=%r" % self._executable)
else:
self._executable = None
self._timeout = timeout
ActionBase.__init__(self)
self._str = ", ".join(string)
def _execute(self, data=None):
self._log.debug("Waiting for window context: %s", self)
start_time = time.time()
while 1:
foreground = Window.get_foreground()
mismatch = False
for match_name in self._match_functions:
match_func = getattr(self, match_name)
if not match_func(foreground):
mismatch = True
break
if not mismatch:
return
if time.time() - start_time > self._timeout:
raise ActionError("Timeout while waiting for window context: %s" % self)
def _match_title(self, foreground):
if self._title is None:
return True
current_title = foreground.title.lower()
if current_title.find(self._title) != -1:
return True
return False
def _match_executable(self, foreground):
if self._executable is None:
return True
current_executable = foreground.executable.lower()
if current_executable.find(self._executable) != -1:
return True
return False
| lgpl-3.0 |
k8s-bot/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py | 202 | 8337 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file is called by Juju.
"""
import contextlib
import os
import socket
import subprocess
import sys
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
hooks = hookenv.Hooks()
@contextlib.contextmanager
def check_sentinel(filepath):
"""
A context manager method to write a file while the code block is doing
something and remove the file when done.
"""
fail = False
try:
yield filepath.exists()
except:
fail = True
filepath.touch()
raise
finally:
if fail is False and filepath.exists():
filepath.remove()
@hooks.hook('config-changed')
def config_changed():
"""
On the execution of the juju event 'config-changed' this function
determines the appropriate architecture and the configured version to
create kubernetes binary files.
"""
hookenv.log('Starting config-changed')
charm_dir = path(hookenv.charm_dir())
config = hookenv.config()
# Get the version of kubernetes to install.
version = config['version']
if version == 'master':
# The 'master' branch of kuberentes is used when master is configured.
branch = 'master'
elif version == 'local':
# Check for kubernetes binaries in the local files/output directory.
branch = None
else:
# Create a branch to a tag to get the release version.
branch = 'tags/{0}'.format(version)
# Get the package architecture, rather than arch from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
if not branch:
output_path = charm_dir / 'files/output'
installer = KubernetesInstaller(arch, version, output_path)
else:
# Build the kuberentes binaries from source on the units.
kubernetes_dir = path('/opt/kubernetes')
# Construct the path to the binaries using the arch.
output_path = kubernetes_dir / '_output/local/bin/linux' / arch
installer = KubernetesInstaller(arch, version, output_path)
if not kubernetes_dir.exists():
print('The source directory {0} does not exist'.format(kubernetes_dir))
print('Was the kubernetes code cloned during install?')
exit(1)
# Change to the kubernetes directory (git repository).
with kubernetes_dir:
# Create a command to get the current branch.
git_branch = 'git branch | grep "\*" | cut -d" " -f2'
current_branch = subprocess.check_output(git_branch, shell=True).strip()
print('Current branch: ', current_branch)
# Create the path to a file to indicate if the build was broken.
broken_build = charm_dir / '.broken_build'
# write out the .broken_build file while this block is executing.
with check_sentinel(broken_build) as last_build_failed:
print('Last build failed: ', last_build_failed)
# Rebuild if current version is different or last build failed.
if current_branch != version or last_build_failed:
installer.build(branch)
if not output_path.isdir():
broken_build.touch()
# Create the symoblic links to the right directories.
installer.install()
relation_changed()
hookenv.log('The config-changed hook completed successfully.')
@hooks.hook('etcd-relation-changed', 'minions-api-relation-changed')
def relation_changed():
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers',):
if not template_data.get(k):
print "Missing data for", k, template_data
return
print "Running with\n", template_data
# Render and restart as needed
for n in ('apiserver', 'controller-manager', 'scheduler'):
if render_file(n, template_data) or not host.service_running(n):
host.service_restart(n)
# Render the file that makes the kubernetes binaries available to minions.
if render_file(
'distribution', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/distribution') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Render the default nginx template.
if render_file(
'nginx', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/default') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Send api endpoint to minions
notify_minions()
@hooks.hook('network-relation-changed')
def network_relation_changed():
relation_id = hookenv.relation_id()
hookenv.relation_set(relation_id, ignore_errors=True)
def notify_minions():
print("Notify minions.")
config = hookenv.config()
for r in hookenv.relation_ids('minions-api'):
hookenv.relation_set(
r,
hostname=hookenv.unit_private_ip(),
port=8080,
version=config['version'])
print("Notified minions of version " + config['version'])
def get_template_data():
rels = hookenv.relations()
config = hookenv.config()
version = config['version']
template_data = {}
template_data['etcd_servers'] = ",".join([
"http://%s:%s" % (s[0], s[1]) for s in sorted(
get_rel_hosts('etcd', rels, ('hostname', 'port')))])
template_data['minions'] = ",".join(get_rel_hosts('minions-api', rels))
template_data['api_bind_address'] = _bind_addr(hookenv.unit_private_ip())
template_data['bind_address'] = "127.0.0.1"
template_data['api_server_address'] = "http://%s:%s" % (
hookenv.unit_private_ip(), 8080)
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
template_data['web_uri'] = "/kubernetes/%s/local/bin/linux/%s/" % (version,
arch)
if version == 'local':
template_data['alias'] = hookenv.charm_dir() + '/files/output/'
else:
directory = '/opt/kubernetes/_output/local/bin/linux/%s/' % arch
template_data['alias'] = directory
_encode(template_data)
return template_data
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError("Could not resolve private address")
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_file(name, data, src_suffix="upstart.tmpl", tgt_path=None):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.%s' % (name, src_suffix))
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
if tgt_path is None:
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
if __name__ == '__main__':
hooks.execute(sys.argv)
| apache-2.0 |
axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/yaml/representer.py | 64 | 17711 |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| apache-2.0 |
knehez/edx-platform | openedx/core/djangoapps/credit/tests/test_tasks.py | 19 | 4845 | """
Tests for credit course tasks.
"""
import mock
from datetime import datetime
from openedx.core.djangoapps.credit.api import get_credit_requirements
from openedx.core.djangoapps.credit.exceptions import InvalidCreditRequirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.signals import listen_for_course_publish
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
class TestTaskExecution(ModuleStoreTestCase):
"""Set of tests to ensure that the task code will do the right thing when
executed directly.
The test course gets created without the listeners being present, which
allows us to ensure that when the listener is executed, it is done as
expected.
"""
def mocked_set_credit_requirements(course_key, requirements): # pylint: disable=no-self-argument, unused-argument
"""Used as a side effect when mocking method credit api method
'set_credit_requirements'.
"""
raise InvalidCreditRequirements
def add_icrv_xblock(self):
""" Create the 'edx-reverification-block' in course tree """
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
ItemFactory.create(
parent=vertical,
category='edx-reverification-block',
display_name='Test Verification Block'
)
def setUp(self):
super(TestTaskExecution, self).setUp()
SignalHandler.course_published.disconnect(listen_for_course_publish)
self.course = CourseFactory.create(start=datetime(2015, 3, 1))
def test_task_adding_requirements_invalid_course(self):
"""
Test that credit requirements cannot be added for non credit course.
"""
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
listen_for_course_publish(self, self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
def test_task_adding_requirements(self):
"""Test that credit requirements are added properly for credit course.
Make sure that the receiver correctly fires off the task when
invoked by signal.
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
listen_for_course_publish(self, self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 1)
def test_task_adding_icrv_requirements(self):
"""Make sure that the receiver correctly fires off the task when
invoked by signal.
"""
self.add_credit_course(self.course.id)
self.add_icrv_xblock()
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
listen_for_course_publish(self, self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 2)
def test_query_counts(self):
self.add_credit_course(self.course.id)
self.add_icrv_xblock()
with check_mongo_calls(3):
listen_for_course_publish(self, self.course.id)
@mock.patch(
'openedx.core.djangoapps.credit.tasks.set_credit_requirements',
mock.Mock(
side_effect=mocked_set_credit_requirements
)
)
def test_retry(self):
"""Test that adding credit requirements is retried when
'InvalidCreditRequirements' exception is raised.
Make sure that the receiver correctly fires off the task when
invoked by signal
"""
self.add_credit_course(self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
listen_for_course_publish(self, self.course.id)
requirements = get_credit_requirements(self.course.id)
self.assertEqual(len(requirements), 0)
def add_credit_course(self, course_key):
"""Add the course as a credit.
Args:
course_key(CourseKey): Identifier for the course
Returns:
CreditCourse object added
"""
credit_course = CreditCourse(course_key=course_key, enabled=True)
credit_course.save()
return credit_course
| agpl-3.0 |
jotes/ansible-modules-core | cloud/openstack/quantum_router.py | 99 | 6996 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Creates a router for tenant admin
- quantum_router: state=present
login_username=admin
login_password=admin
login_tenant_name=admin
name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception, e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
grlee77/numpy | numpy/f2py/tests/test_parameter.py | 17 | 3910 | import os
import pytest
import numpy as np
from numpy.testing import assert_raises, assert_equal
from . import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestParameters(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [_path('src', 'parameter', 'constant_real.f90'),
_path('src', 'parameter', 'constant_integer.f90'),
_path('src', 'parameter', 'constant_both.f90'),
_path('src', 'parameter', 'constant_compound.f90'),
_path('src', 'parameter', 'constant_non_compound.f90'),
]
@pytest.mark.slow
def test_constant_real_single(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
assert_raises(ValueError, self.module.foo_single, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_double, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
assert_raises(ValueError, self.module.foo_compound_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
assert_equal(x, [0 + 1 + 2*6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
assert_raises(ValueError, self.module.foo_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int64)[::2]
assert_raises(ValueError, self.module.foo_long, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
@pytest.mark.slow
def test_constant_no(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_no, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
@pytest.mark.slow
def test_constant_sum(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_sum, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
| bsd-3-clause |
iot-factory/synapse | synapse/storage/transactions.py | 1 | 10722 | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from synapse.util.caches.descriptors import cached
from collections import namedtuple
from canonicaljson import encode_canonical_json
import logging
logger = logging.getLogger(__name__)
class TransactionStore(SQLBaseStore):
"""A collection of queries for handling PDUs.
"""
def get_received_txn_response(self, transaction_id, origin):
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
body (as a dict).
Args:
transaction_id (str)
origin(str)
Returns:
tuple: None if we have not previously responded to
this transaction or a 2-tuple of (int, dict)
"""
return self.runInteraction(
"get_received_txn_response",
self._get_received_txn_response, transaction_id, origin
)
def _get_received_txn_response(self, txn, transaction_id, origin):
result = self._simple_select_one_txn(
txn,
table=ReceivedTransactionsTable.table_name,
keyvalues={
"transaction_id": transaction_id,
"origin": origin,
},
retcols=ReceivedTransactionsTable.fields,
allow_none=True,
)
if result and result["response_code"]:
return result["response_code"], result["response_json"]
else:
return None
def set_received_txn_response(self, transaction_id, origin, code,
response_dict):
"""Persist the response we returened for an incoming transaction, and
should return for subsequent transactions with the same transaction_id
and origin.
Args:
txn
transaction_id (str)
origin (str)
code (int)
response_json (str)
"""
return self._simple_insert(
table=ReceivedTransactionsTable.table_name,
values={
"transaction_id": transaction_id,
"origin": origin,
"response_code": code,
"response_json": buffer(encode_canonical_json(response_dict)),
},
or_ignore=True,
desc="set_received_txn_response",
)
def prep_send_transaction(self, transaction_id, destination,
origin_server_ts):
"""Persists an outgoing transaction and calculates the values for the
previous transaction id list.
This should be called before sending the transaction so that it has the
correct value for the `prev_ids` key.
Args:
transaction_id (str)
destination (str)
origin_server_ts (int)
Returns:
list: A list of previous transaction ids.
"""
return self.runInteraction(
"prep_send_transaction",
self._prep_send_transaction,
transaction_id, destination, origin_server_ts
)
def _prep_send_transaction(self, txn, transaction_id, destination,
origin_server_ts):
next_id = self._transaction_id_gen.get_next_txn(txn)
# First we find out what the prev_txns should be.
# Since we know that we are only sending one transaction at a time,
# we can simply take the last one.
query = (
"SELECT * FROM sent_transactions"
" WHERE destination = ?"
" ORDER BY id DESC LIMIT 1"
)
txn.execute(query, (destination,))
results = self.cursor_to_dict(txn)
prev_txns = [r["transaction_id"] for r in results]
# Actually add the new transaction to the sent_transactions table.
self._simple_insert_txn(
txn,
table=SentTransactions.table_name,
values={
"id": next_id,
"transaction_id": transaction_id,
"destination": destination,
"ts": origin_server_ts,
"response_code": 0,
"response_json": None,
}
)
# TODO Update the tx id -> pdu id mapping
return prev_txns
def delivered_txn(self, transaction_id, destination, code, response_dict):
"""Persists the response for an outgoing transaction.
Args:
transaction_id (str)
destination (str)
code (int)
response_json (str)
"""
return self.runInteraction(
"delivered_txn",
self._delivered_txn,
transaction_id, destination, code,
buffer(encode_canonical_json(response_dict)),
)
def _delivered_txn(self, txn, transaction_id, destination,
code, response_json):
self._simple_update_one_txn(
txn,
table=SentTransactions.table_name,
keyvalues={
"transaction_id": transaction_id,
"destination": destination,
},
updatevalues={
"response_code": code,
"response_json": None, # For now, don't persist response_json
}
)
def get_transactions_after(self, transaction_id, destination):
"""Get all transactions after a given local transaction_id.
Args:
transaction_id (str)
destination (str)
Returns:
list: A list of dicts
"""
return self.runInteraction(
"get_transactions_after",
self._get_transactions_after, transaction_id, destination
)
def _get_transactions_after(self, txn, transaction_id, destination):
query = (
"SELECT * FROM sent_transactions"
" WHERE destination = ? AND id >"
" ("
" SELECT id FROM sent_transactions"
" WHERE transaction_id = ? AND destination = ?"
" )"
)
txn.execute(query, (destination, transaction_id, destination))
return self.cursor_to_dict(txn)
@cached()
def get_destination_retry_timings(self, destination):
"""Gets the current retry timings (if any) for a given destination.
Args:
destination (str)
Returns:
None if not retrying
Otherwise a dict for the retry scheme
"""
return self.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings, destination)
def _get_destination_retry_timings(self, txn, destination):
result = self._simple_select_one_txn(
txn,
table=DestinationsTable.table_name,
keyvalues={
"destination": destination,
},
retcols=DestinationsTable.fields,
allow_none=True,
)
if result and result["retry_last_ts"] > 0:
return result
else:
return None
def set_destination_retry_timings(self, destination,
retry_last_ts, retry_interval):
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Args:
destination (str)
retry_last_ts (int) - time of last retry attempt in unix epoch ms
retry_interval (int) - how long until next retry in ms
"""
# XXX: we could chose to not bother persisting this if our cache thinks
# this is a NOOP
return self.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings,
destination,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings(self, txn, destination,
retry_last_ts, retry_interval):
txn.call_after(self.get_destination_retry_timings.invalidate, (destination,))
self._simple_upsert_txn(
txn,
"destinations",
keyvalues={
"destination": destination,
},
values={
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
insertion_values={
"destination": destination,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
}
)
def get_destinations_needing_retry(self):
"""Get all destinations which are due a retry for sending a transaction.
Returns:
list: A list of dicts
"""
return self.runInteraction(
"get_destinations_needing_retry",
self._get_destinations_needing_retry
)
def _get_destinations_needing_retry(self, txn):
query = (
"SELECT * FROM destinations"
" WHERE retry_last_ts > 0 and retry_next_ts < ?"
)
txn.execute(query, (self._clock.time_msec(),))
return self.cursor_to_dict(txn)
class ReceivedTransactionsTable(object):
table_name = "received_transactions"
fields = [
"transaction_id",
"origin",
"ts",
"response_code",
"response_json",
"has_been_referenced",
]
class SentTransactions(object):
table_name = "sent_transactions"
fields = [
"id",
"transaction_id",
"destination",
"ts",
"response_code",
"response_json",
]
EntryType = namedtuple("SentTransactionsEntry", fields)
class TransactionsToPduTable(object):
table_name = "transaction_id_to_pdu"
fields = [
"transaction_id",
"destination",
"pdu_id",
"pdu_origin",
]
class DestinationsTable(object):
table_name = "destinations"
fields = [
"destination",
"retry_last_ts",
"retry_interval",
]
| apache-2.0 |
aselle/tensorflow | tensorflow/contrib/estimator/python/estimator/hooks_test.py | 4 | 11763 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
from tensorflow.contrib.estimator.python.estimator import hooks as hooks_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import training
def summary_step_keyword_to_value_mapping(dir_):
writer_cache.FileWriterCache.clear()
# Get last Event written.
event_paths = glob.glob(os.path.join(dir_, 'events*'))
step_keyword_to_value = {}
for last_event in summary_iterator.summary_iterator(event_paths[-1]):
if last_event.step not in step_keyword_to_value:
step_keyword_to_value[last_event.step] = {}
if last_event.summary is not None:
for value in last_event.summary.value:
step_keyword_to_value[last_event.step][value.tag] = value.simple_value
return step_keyword_to_value
def get_summary_value(dir_, step, keyword):
"""Get summary value for given step and keyword."""
writer_cache.FileWriterCache.clear()
# Get last Event written.
event_paths = glob.glob(os.path.join(dir_, 'events*'))
print('XXX', event_paths)
for last_event in summary_iterator.summary_iterator(event_paths[-1]):
if last_event.step == step and last_event.summary is not None:
for value in last_event.summary.value:
if keyword in value.tag:
return value.simple_value
return None
class InMemoryEvaluatorHookTest(test.TestCase):
def test_runs_eval_metrics(self):
def model_fn(features, labels, mode):
_ = labels
if estimator_lib.ModeKeys.TRAIN == mode:
with ops.control_dependencies([features]):
train_op = state_ops.assign_add(training.get_global_step(), 1)
return estimator_lib.EstimatorSpec(
mode, loss=constant_op.constant(3.), train_op=train_op)
if estimator_lib.ModeKeys.EVAL == mode:
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant(5.),
eval_metric_ops={'mean_of_features': metrics_lib.mean(features)})
estimator = estimator_lib.Estimator(model_fn=model_fn)
def input_fn():
return dataset_ops.Dataset.range(10)
evaluator = hooks_lib.InMemoryEvaluatorHook(
estimator, input_fn, every_n_iter=4)
estimator.train(input_fn, hooks=[evaluator])
self.assertTrue(os.path.isdir(estimator.eval_dir()))
step_keyword_to_value = summary_step_keyword_to_value_mapping(
estimator.eval_dir())
# 4.5 = sum(range(10))/10
# before training
self.assertEqual(4.5, step_keyword_to_value[0]['mean_of_features'])
# intervals (every_n_iter=4)
self.assertEqual(4.5, step_keyword_to_value[4]['mean_of_features'])
self.assertEqual(4.5, step_keyword_to_value[8]['mean_of_features'])
# end
self.assertEqual(4.5, step_keyword_to_value[10]['mean_of_features'])
self.assertEqual(set([0, 4, 8, 10]), set(step_keyword_to_value.keys()))
def test_uses_latest_variable_value(self):
def model_fn(features, labels, mode):
_ = labels
step = training.get_global_step()
w = variable_scope.get_variable(
'w',
shape=[],
initializer=init_ops.zeros_initializer(),
dtype=dtypes.int64)
if estimator_lib.ModeKeys.TRAIN == mode:
# to consume features, we have control dependency
with ops.control_dependencies([features]):
step_inc = state_ops.assign_add(training.get_global_step(), 1)
with ops.control_dependencies([step_inc]):
assign_w_to_step_plus_2 = w.assign(step + 2)
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant(3.),
train_op=assign_w_to_step_plus_2)
if estimator_lib.ModeKeys.EVAL == mode:
# to consume features, we have control dependency
with ops.control_dependencies([features]):
loss = constant_op.constant(5.)
return estimator_lib.EstimatorSpec(
mode,
loss=loss,
# w is constant in each step, so the mean.
# w = 0 if step==0 else step+2
eval_metric_ops={'mean_of_const': metrics_lib.mean(w)})
estimator = estimator_lib.Estimator(model_fn=model_fn)
def input_fn():
return dataset_ops.Dataset.range(10)
evaluator = hooks_lib.InMemoryEvaluatorHook(
estimator, input_fn, every_n_iter=4)
estimator.train(input_fn, hooks=[evaluator])
self.assertTrue(os.path.isdir(estimator.eval_dir()))
step_keyword_to_value = summary_step_keyword_to_value_mapping(
estimator.eval_dir())
# w = 0 if step==0 else step+2
self.assertEqual(0, step_keyword_to_value[0]['mean_of_const'])
self.assertEqual(6, step_keyword_to_value[4]['mean_of_const'])
self.assertEqual(12, step_keyword_to_value[10]['mean_of_const'])
def test_dnn_classifier(self):
embedding = feature_column_lib.embedding_column(
feature_column_lib.categorical_column_with_vocabulary_list(
'wire_cast', ['kima', 'omar', 'stringer']), 8)
dnn = estimator_lib.DNNClassifier(
feature_columns=[embedding], hidden_units=[3, 1])
def train_input_fn():
return dataset_ops.Dataset.from_tensors(({
'wire_cast': [['omar'], ['kima']]
}, [[0], [1]])).repeat(3)
def eval_input_fn():
return dataset_ops.Dataset.from_tensors(({
'wire_cast': [['stringer'], ['kima']]
}, [[0], [1]])).repeat(2)
evaluator = hooks_lib.InMemoryEvaluatorHook(
dnn, eval_input_fn, name='in-memory')
dnn.train(train_input_fn, hooks=[evaluator])
self.assertTrue(os.path.isdir(dnn.eval_dir('in-memory')))
step_keyword_to_value = summary_step_keyword_to_value_mapping(
dnn.eval_dir('in-memory'))
final_metrics = dnn.evaluate(eval_input_fn)
step = final_metrics[ops.GraphKeys.GLOBAL_STEP]
for summary_tag in final_metrics:
if summary_tag == ops.GraphKeys.GLOBAL_STEP:
continue
self.assertEqual(final_metrics[summary_tag],
step_keyword_to_value[step][summary_tag])
def test_raise_error_with_multi_worker(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4', 'host5:5']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
dnn = estimator_lib.DNNClassifier(
feature_columns=[feature_column_lib.numeric_column('x')],
hidden_units=[3, 1])
def eval_input_fn():
pass
with self.assertRaisesRegexp(ValueError, 'supports only single machine'):
hooks_lib.InMemoryEvaluatorHook(dnn, eval_input_fn)
def test_raise_error_with_ps(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1'],
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
dnn = estimator_lib.DNNClassifier(
feature_columns=[feature_column_lib.numeric_column('x')],
hidden_units=[3, 1])
def eval_input_fn():
pass
with self.assertRaisesRegexp(ValueError, 'supports only single machine'):
hooks_lib.InMemoryEvaluatorHook(dnn, eval_input_fn)
def test_raise_error_with_custom_saver_in_eval(self):
def model_fn(features, labels, mode):
_, _ = features, labels
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant(3.),
scaffold=training.Scaffold(saver=training.Saver()),
train_op=constant_op.constant(5.),
eval_metric_ops={
'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
})
estimator = estimator_lib.Estimator(model_fn=model_fn)
def input_fn():
return dataset_ops.Dataset.range(10)
evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
with self.assertRaisesRegexp(ValueError, 'does not support custom saver'):
evaluator.begin()
def test_raise_error_with_custom_init_fn_in_eval(self):
def model_fn(features, labels, mode):
_, _ = features, labels
def init_fn(scaffold, session):
_, _ = scaffold, session
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant(3.),
scaffold=training.Scaffold(init_fn=init_fn),
train_op=constant_op.constant(5.),
eval_metric_ops={
'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
})
estimator = estimator_lib.Estimator(model_fn=model_fn)
def input_fn():
return dataset_ops.Dataset.range(10)
evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
with self.assertRaisesRegexp(ValueError, 'does not support custom init_fn'):
evaluator.begin()
def test_raise_error_with_saveables_other_than_global_variables(self):
def model_fn(features, labels, mode):
_, _ = features, labels
w = variables.Variable(
initial_value=[0.],
trainable=False,
collections=[ops.GraphKeys.SAVEABLE_OBJECTS])
init_op = control_flow_ops.group(
[w.initializer, training.get_global_step().initializer])
return estimator_lib.EstimatorSpec(
mode,
loss=constant_op.constant(3.),
scaffold=training.Scaffold(init_op=init_op),
train_op=constant_op.constant(5.),
eval_metric_ops={
'mean_of_features': metrics_lib.mean(constant_op.constant(2.))
})
estimator = estimator_lib.Estimator(model_fn=model_fn)
def input_fn():
return dataset_ops.Dataset.range(10)
evaluator = hooks_lib.InMemoryEvaluatorHook(estimator, input_fn)
with self.assertRaisesRegexp(ValueError, 'does not support saveables'):
estimator.train(input_fn, hooks=[evaluator])
if __name__ == '__main__':
test.main()
| apache-2.0 |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Lib/multiprocessing/__init__.py | 45 | 7743 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| apache-2.0 |
drexly/openhgsenti | lib/django/views/defaults.py | 339 | 3567 | from django import http
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils import six
from django.utils.encoding import force_text
from django.views.decorators.csrf import requires_csrf_token
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, exception, template_name='404.html'):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
exception
The message from the exception which triggered the 404 (if one was
supplied), or the exception class name
"""
exception_repr = exception.__class__.__name__
# Try to get an "interesting" exception message, if any (and not the ugly
# Resolver404 dictionary)
try:
message = exception.args[0]
except (AttributeError, IndexError):
pass
else:
if isinstance(message, six.text_type):
exception_repr = message
context = {
'request_path': request.path,
'exception': exception_repr,
}
try:
template = loader.get_template(template_name)
body = template.render(context, request)
content_type = None # Django will use DEFAULT_CONTENT_TYPE
except TemplateDoesNotExist:
template = Engine().from_string(
'<h1>Not Found</h1>'
'<p>The requested URL {{ request_path }} was not found on this server.</p>')
body = template.render(Context(context))
content_type = 'text/html'
return http.HttpResponseNotFound(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseServerError('<h1>Server Error (500)</h1>', content_type='text/html')
return http.HttpResponseServerError(template.render())
@requires_csrf_token
def bad_request(request, exception, template_name='400.html'):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseBadRequest('<h1>Bad Request (400)</h1>', content_type='text/html')
# No exception content is passed to the template, to not disclose any sensitive information.
return http.HttpResponseBadRequest(template.render())
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, exception, template_name='403.html'):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 2616) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseForbidden('<h1>403 Forbidden</h1>', content_type='text/html')
return http.HttpResponseForbidden(
template.render(request=request, context={'exception': force_text(exception)})
)
| apache-2.0 |
candy7393/VTK | ThirdParty/Twisted/twisted/persisted/crefutil.py | 45 | 4496 | # -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility classes for dealing with circular references.
"""
import types
from twisted.python import log, reflect
class NotKnown:
def __init__(self):
self.dependants = []
self.resolved = 0
def addDependant(self, mutableObject, key):
assert not self.resolved
self.dependants.append( (mutableObject, key) )
resolvedObject = None
def resolveDependants(self, newObject):
self.resolved = 1
self.resolvedObject = newObject
for mut, key in self.dependants:
mut[key] = newObject
if isinstance(newObject, NotKnown):
newObject.addDependant(mut, key)
def __hash__(self):
assert 0, "I am not to be used as a dictionary key."
class _Container(NotKnown):
"""
Helper class to resolve circular references on container objects.
"""
def __init__(self, l, containerType):
"""
@param l: The list of object which may contain some not yet referenced
objects.
@param containerType: A type of container objects (e.g., C{tuple} or
C{set}).
"""
NotKnown.__init__(self)
self.containerType = containerType
self.l = l
self.locs = range(len(l))
for idx in xrange(len(l)):
if not isinstance(l[idx], NotKnown):
self.locs.remove(idx)
else:
l[idx].addDependant(self, idx)
if not self.locs:
self.resolveDependants(self.containerType(self.l))
def __setitem__(self, n, obj):
"""
Change the value of one contained objects, and resolve references if
all objects have been referenced.
"""
self.l[n] = obj
if not isinstance(obj, NotKnown):
self.locs.remove(n)
if not self.locs:
self.resolveDependants(self.containerType(self.l))
class _Tuple(_Container):
"""
Manage tuple containing circular references. Deprecated: use C{_Container}
instead.
"""
def __init__(self, l):
"""
@param l: The list of object which may contain some not yet referenced
objects.
"""
_Container.__init__(self, l, tuple)
class _InstanceMethod(NotKnown):
def __init__(self, im_name, im_self, im_class):
NotKnown.__init__(self)
self.my_class = im_class
self.name = im_name
# im_self _must_ be a
im_self.addDependant(self, 0)
def __call__(self, *args, **kw):
import traceback
log.msg('instance method %s.%s' % (reflect.qual(self.my_class), self.name))
log.msg('being called with %r %r' % (args, kw))
traceback.print_stack(file=log.logfile)
assert 0
def __setitem__(self, n, obj):
assert n == 0, "only zero index allowed"
if not isinstance(obj, NotKnown):
method = types.MethodType(self.my_class.__dict__[self.name],
obj, self.my_class)
self.resolveDependants(method)
class _DictKeyAndValue:
def __init__(self, dict):
self.dict = dict
def __setitem__(self, n, obj):
if n not in (1, 0):
raise RuntimeError("DictKeyAndValue should only ever be called with 0 or 1")
if n: # value
self.value = obj
else:
self.key = obj
if hasattr(self, "key") and hasattr(self, "value"):
self.dict[self.key] = self.value
class _Dereference(NotKnown):
def __init__(self, id):
NotKnown.__init__(self)
self.id = id
from twisted.internet.defer import Deferred
class _Catcher:
def catch(self, value):
self.value = value
class _Defer(Deferred, NotKnown):
def __init__(self):
Deferred.__init__(self)
NotKnown.__init__(self)
self.pause()
wasset = 0
def __setitem__(self, n, obj):
if self.wasset:
raise RuntimeError('setitem should only be called once, setting %r to %r' % (n, obj))
else:
self.wasset = 1
self.callback(obj)
def addDependant(self, dep, key):
# by the time I'm adding a dependant, I'm *not* adding any more
# callbacks
NotKnown.addDependant(self, dep, key)
self.unpause()
resovd = self.result
self.resolveDependants(resovd)
| bsd-3-clause |
esa/SpaceAMPL | interplanetary/impulsive/single_phase/include/writeequations.py | 1 | 13720 | import sys;
file = open("equations.inc","w")
file2 = open("writeinitialguess.inc","w")
file3 = open("writesolution.inc","w")
file4 = open("guesstangential.inc","w")
n=int(sys.argv[1]);
file.write("#------------------------------------------------------------------------\n")
file.write("#Optimisation Variables\n\n")
file.write("#Impulsive DVs\n")
file.write("var ux{i in 2..n-1};\n")
file.write("var uy{i in 2..n-1};\n")
file.write("var uz{i in 2..n-1};\n")
file.write("var uT{i in 2..n-1} = sqrt(ux[i]**2+uy[i]**2+uz[i]**2);\n\n")
file.write("#Starting VINF\n")
file.write("var VINFx:=0.0001;\n")
file.write("var VINFy:=0.0001;\n")
file.write("var VINFz:=0.0001;\n")
file.write("var VINF = sqrt(VINFx^2+VINFy^2+VINFz^2);\n\n")
file.write("#Ending VINF\n")
file.write("var VINFxf:=0.0001;\n")
file.write("var VINFyf:=0.0001;\n")
file.write("var VINFzf:=0.0001;\n")
file.write("var VINFf = sqrt(VINFxf^2+VINFyf^2+VINFzf^2);\n\n")
file.write("#Eccentric Anomaly Differences between nodes\n")
file.write("var DE{i in J};\n\n")
file.write("#Initial time\n")
file.write("var timod := tI * d2u * f, <= (tI+tbnd)*d2u*f, >= (tI-tbnd)*d2u*f; \n")
file.write("#Time of flight \n")
file.write("var tfmod := tT * d2u * f, <= (tT+tbnd)*d2u*f, >= (tT-tbnd)*d2u*f; \n")
file.write("#--------------------------------------------------------------------------\n\n")
file.write("#--------------------------------------------------------------------------\n")
file.write("#We here introduce some time variables that simplifies the formulas \n")
file.write("var ti = timod /f; #Initial time non dimensional\n")
file.write("var tf = tfmod /f; #Time of flight non dimensional\n")
file.write("var tF = ti/d2u + tf/d2u; #Arrival time (MJD2000)\n")
file.write("var dt = tf/(n-1); #Inter-node temporal separation\n")
file.write("#--------------------------------------------------------------------------\n\n")
file.write("#--------------------------------------------------------------------------\n")
file.write("#Planet ephemerides are set and evaluated in tI, tI+tT\n")
file.write("include include/ephcalc.inc;\n")
file.write("fix timod;\n")
file.write("fix tfmod;\n")
file.write("solve;\n")
file.write("unfix timod;\n")
file.write("unfix tfmod;\n")
file.write("#--------------------------------------------------------------------------\n\n\n\n")
file.write("#--------------------------------------------------------------------------\n")
file.write("# Node 1: Departure Node\n")
file.write("var x1 = x0;\n")
file.write("var y1 = y0;\n")
file.write("var z1 = z0;\n")
file.write("var dx1 = dx0 + VINFx;\n")
file.write("var dy1 = dy0 + VINFy;\n")
file.write("var dz1 = dz0 + VINFz;\n\n")
file.write("#Basic definitions\n")
file.write("var r1 = sqrt(x1^2+y1^2+z1^2);\n")
file.write("var v1 = sqrt(dx1^2+dy1^2+dz1^2);\n")
file.write("var a1 = 1 / (2/r1 - v1^2);\n")
file.write("var sigma1 = x1*dx1+y1*dy1+z1*dz1;\n")
file.write("var meanmotion1 = sqrt(1/a1^3);\n")
file.write("var DM1 = meanmotion1 * dt/2;\n\n")
file.write("#Lagrange Coefficients\n")
file.write("var rvar1 = a1 + (r1-a1)*cos(DE[1]) + sigma1*sqrt(a1)*sin(DE[1]);\n")
file.write("var F1 = 1 - a1/r1 * (1-cos(DE[1]));\n")
file.write("var G1 = a1*sigma1*(1-cos(DE[1])) + r1*sqrt(a1)*sin(DE[1]);\n")
file.write("var Ft1 = -sqrt(a1)/(r1*rvar1) * sin(DE[1]);\n")
file.write("var Gt1 = 1 - a1/rvar1*(1-cos(DE[1]));\n\n")
file.write("subject to KeplerEquations1: \n")
file.write(" DM1 - DE[1] - sigma1/sqrt(a1) * (1 - cos(DE[1])) + (1 - r1/a1)*sin(DE[1]) = 0;\n")
file.write("#--------------------------------------------------------------------------\n\n")
for i in range(2,n-1):
file.write("#--------------------------------------------------------------------------\n")
file.write("# Node " +str(i)+"\n")
file.write("var x" +str(i)+" = F" +str(i-1)+"*x" +str(i-1)+" + G" +str(i-1)+"*dx" +str(i-1)+";\n")
file.write("var y" +str(i)+" = F" +str(i-1)+"*y" +str(i-1)+" + G" +str(i-1)+"*dy" +str(i-1)+";\n")
file.write("var z" +str(i)+" = F" +str(i-1)+"*z" +str(i-1)+" + G" +str(i-1)+"*dz" +str(i-1)+";\n")
file.write("var dx" +str(i)+" = Ft" +str(i-1)+"*x" +str(i-1)+" + Gt" +str(i-1)+"*dx" +str(i-1)+" + ux[" +str(i)+"];\n")
file.write("var dy" +str(i)+" = Ft" +str(i-1)+"*y" +str(i-1)+" + Gt" +str(i-1)+"*dy" +str(i-1)+" + uy[" +str(i)+"];\n")
file.write("var dz" +str(i)+" = Ft" +str(i-1)+"*z" +str(i-1)+" + Gt" +str(i-1)+"*dz" +str(i-1)+" + uz[" +str(i)+"];\n\n")
file.write("#Basic definitions\n")
file.write("var r" +str(i)+" = sqrt(x" +str(i)+"^2+y" +str(i)+"^2+z" +str(i)+"^2);\n")
file.write("var v" +str(i)+" = sqrt(dx" +str(i)+"^2+dy" +str(i)+"^2+dz" +str(i)+"^2);\n")
file.write("var a" +str(i)+" = 1 / (2/r" +str(i)+" - v" +str(i)+"^2);\n")
file.write("var sigma" +str(i)+" = x" +str(i)+"*dx" +str(i)+"+y" +str(i)+"*dy" +str(i)+"+z" +str(i)+"*dz" +str(i)+";\n")
file.write("var meanmotion" +str(i)+" = sqrt(1/a" +str(i)+"^3);\n")
file.write("var DM" +str(i)+" = meanmotion" +str(i)+" * dt;\n\n")
file.write("#Lagrange Coefficients\n")
file.write("var rvar" +str(i)+" = a" +str(i)+" + (r" +str(i)+"-a" +str(i)+")*cos(DE[" +str(i)+"]) + sigma" +str(i)+"*sqrt(a" +str(i)+")*sin(DE[" +str(i)+"]);\n")
file.write("var F" +str(i)+" = 1 - a" +str(i)+"/r" +str(i)+" * (1-cos(DE[" +str(i)+"]));\n")
file.write("var G" +str(i)+" = a" +str(i)+"*sigma" +str(i)+"*(1-cos(DE[" +str(i)+"])) + r" +str(i)+"*sqrt(a" +str(i)+")*sin(DE[" +str(i)+"]);\n")
file.write("var Ft" +str(i)+" = -sqrt(a" +str(i)+")/(r" +str(i)+"*rvar" +str(i)+") * sin(DE[" +str(i)+"]);\n")
file.write("var Gt" +str(i)+" = 1 - a" +str(i)+"/rvar" +str(i)+"*(1-cos(DE[" +str(i)+"]));\n\n")
file.write("subject to KeplerEquations" +str(i)+": \n")
file.write(" DM" +str(i)+" - DE[" +str(i)+"] - sigma" +str(i)+"/sqrt(a" +str(i)+") * (1 - cos(DE[" +str(i)+"])) + (1 - r" +str(i)+"/a" +str(i)+")*sin(DE[" +str(i)+"]) = 0;\n")
file.write("#--------------------------------------------------------------------------\n\n")
i=n-1
file.write("#--------------------------------------------------------------------------\n")
file.write("# Node " +str(i)+"\n")
file.write("var x" +str(i)+" = F" +str(i-1)+"*x" +str(i-1)+" + G" +str(i-1)+"*dx" +str(i-1)+";\n")
file.write("var y" +str(i)+" = F" +str(i-1)+"*y" +str(i-1)+" + G" +str(i-1)+"*dy" +str(i-1)+";\n")
file.write("var z" +str(i)+" = F" +str(i-1)+"*z" +str(i-1)+" + G" +str(i-1)+"*dz" +str(i-1)+";\n")
file.write("var dx" +str(i)+" = Ft" +str(i-1)+"*x" +str(i-1)+" + Gt" +str(i-1)+"*dx" +str(i-1)+" + ux[" +str(i)+"];\n")
file.write("var dy" +str(i)+" = Ft" +str(i-1)+"*y" +str(i-1)+" + Gt" +str(i-1)+"*dy" +str(i-1)+" + uy[" +str(i)+"];\n")
file.write("var dz" +str(i)+" = Ft" +str(i-1)+"*z" +str(i-1)+" + Gt" +str(i-1)+"*dz" +str(i-1)+" + uz[" +str(i)+"];\n\n")
file.write("#Basic definitions\n")
file.write("var r" +str(i)+" = sqrt(x" +str(i)+"^2+y" +str(i)+"^2+z" +str(i)+"^2);\n")
file.write("var v" +str(i)+" = sqrt(dx" +str(i)+"^2+dy" +str(i)+"^2+dz" +str(i)+"^2);\n")
file.write("var a" +str(i)+" = 1 / (2/r" +str(i)+" - v" +str(i)+"^2);\n")
file.write("var sigma" +str(i)+" = x" +str(i)+"*dx" +str(i)+"+y" +str(i)+"*dy" +str(i)+"+z" +str(i)+"*dz" +str(i)+";\n")
file.write("var meanmotion" +str(i)+" = sqrt(1/a" +str(i)+"^3);\n")
file.write("var DM" +str(i)+" = meanmotion" +str(i)+" * dt/2;\n\n")
file.write("#Lagrange Coefficients\n")
file.write("var rvar" +str(i)+" = a" +str(i)+" + (r" +str(i)+"-a" +str(i)+")*cos(DE[" +str(i)+"]) + sigma" +str(i)+"*sqrt(a" +str(i)+")*sin(DE[" +str(i)+"]);\n")
file.write("var F" +str(i)+" = 1 - a" +str(i)+"/r" +str(i)+" * (1-cos(DE[" +str(i)+"]));\n")
file.write("var G" +str(i)+" = a" +str(i)+"*sigma" +str(i)+"*(1-cos(DE[" +str(i)+"])) + r" +str(i)+"*sqrt(a" +str(i)+")*sin(DE[" +str(i)+"]);\n")
file.write("var Ft" +str(i)+" = -sqrt(a" +str(i)+")/(r" +str(i)+"*rvar" +str(i)+") * sin(DE[" +str(i)+"]);\n")
file.write("var Gt" +str(i)+" = 1 - a" +str(i)+"/rvar" +str(i)+"*(1-cos(DE[" +str(i)+"]));\n\n")
file.write("subject to KeplerEquations" +str(i)+": \n")
file.write(" DM" +str(i)+" - DE[" +str(i)+"] - sigma" +str(i)+"/sqrt(a" +str(i)+") * (1 - cos(DE[" +str(i)+"])) + (1 - r" +str(i)+"/a" +str(i)+")*sin(DE[" +str(i)+"]) = 0;\n")
file.write("#--------------------------------------------------------------------------\n\n")
file.write("#--------------------------------------------------------------------------\n")
file.write("# Node n: Arrival node\n")
file.write("var xn = F" +str(n-1)+"*x" +str(n-1)+" + G" +str(n-1)+"*dx" +str(n-1)+";\n")
file.write("var yn = F" +str(n-1)+"*y" +str(n-1)+" + G" +str(n-1)+"*dy" +str(n-1)+";\n")
file.write("var zn = F" +str(n-1)+"*z" +str(n-1)+" + G" +str(n-1)+"*dz" +str(n-1)+";\n")
file.write("var dxn = Ft" +str(n-1)+"*x" +str(n-1)+" + Gt" +str(n-1)+"*dx" +str(n-1)+"+ VINFxf;\n")
file.write("var dyn = Ft" +str(n-1)+"*y" +str(n-1)+" + Gt" +str(n-1)+"*dy" +str(n-1)+"+ VINFyf;\n")
file.write("var dzn = Ft" +str(n-1)+"*z" +str(n-1)+" + Gt" +str(n-1)+"*dz" +str(n-1)+"+ VINFzf;\n\n")
file.write("#Basic definitions\n")
file.write("var rn = sqrt(xn^2+yn^2+zn^2);\n")
file.write("var vn = sqrt(dxn^2+dyn^2+dzn^2);\n")
file.write("var an = 1 / (2/rn - vn^2);\n")
file.write("#--------------------------------------------------------------------------\n\n")
file.write("#--------------------------------------------------------------------------\n")
file.write("#Match Constraint\n")
file.write("subject to \n")
file.write(" FinalPositionx : xn = xf;\n")
file.write(" FinalPositiony : yn = yf;\n")
file.write(" FinalPositionz : zn = zf;\n")
file.write(" FinalVelocityx : dxn = dxf;\n")
file.write(" FinalVelocityy : dyn = dyf;\n")
file.write(" FinalVelocityz : dzn = dzf;\n")
file.write("#--------------------------------------------------------------------------\n")
#file2.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\",x1,y1,z1,dx1,dy1,dz1,1,VINFx,VINFy,VINFz>out/InitialGuess.out;\n")
for i in range(2,n):
file2.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\\n\",x"+str(i)+",y"+str(i)+",z"+str(i)+",dx"+str(i)+",dy"+str(i)+",dz"+str(i)+",m["+str(i)+"],ux["+str(i)+"],uy["+str(i)+"],uz["+str(i)+"]>out/InitialGuess.out;\n")
#file2.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\",xn,yn,zn,dxn,dyn,dzn,m[n-1],VINFxf,VINFyf,VINFzf>out/InitialGuess.out;\n")
file2.write("close out/InitialGuess.out;")
#file3.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\",x1,y1,z1,dx1,dy1,dz1,1,VINFx,VINFy,VINFz>out/solution.out;\n")
for i in range(2,n):
file3.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\\n\",x"+str(i)+",y"+str(i)+",z"+str(i)+",dx"+str(i)+",dy"+str(i)+",dz"+str(i)+",m["+str(i)+"],ux["+str(i)+"],uy["+str(i)+"],uz["+str(i)+"]>out/solution.out;\n")
#file3.write("printf \"%17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e, %17.16e\\n\",xn,yn,zn,dxn,dyn,dzn,m[n-1],VINFxf,VINFyf,VINFzf>out/solution.out;\n")
file3.write("close out/solution.out;")
file4.write("let {i in 2..n-1} ux[i]:=Tmax*0.0000001;\n")
file4.write("let {i in 2..n-1} uy[i]:=Tmax*0.0000001;\n")
file4.write("let {i in 2..n-1} uz[i]:=Tmax*0.0000001;\n\n")
#Tangentialguess
file4.write("#--------------------------------------------------------------------------\n")
file4.write("#Initial Guess for the DE variables\n")
file4.write("let {i in J} DE[i] := DM1;\n")
file4.write("#-----------------------------------------------------------------------\n\n")
for i in range(2,n-1):
file4.write("let ux["+str(i)+"]:=dx"+str(i)+"/v"+str(i)+"*Tmax/2* tf/(n-1);\n")
file4.write("let uy["+str(i)+"]:=dy"+str(i)+"/v"+str(i)+"*Tmax/2* tf/(n-1);\n")
file4.write("let uz["+str(i)+"]:=dz"+str(i)+"/v"+str(i)+"*Tmax/2* tf/(n-1);\n")
file4.write("subject to\n")
file4.write(" thrustON{i in 2..n-1}: uT[i] <= Tmax*tf/(n-1);\n\n")
file4.write("minimize\n")
file4.write(" position: (xf-xn)^2+(yf-yn)^2+(zf-zn)^2+(dxf-dxn)^2+(dyf-dyn)^2+(dzf-dzn)^2;\n\n")
file4.write("drop FinalPositionx;\n")
file4.write("drop FinalPositiony;\n")
file4.write("drop FinalPositionz;\n")
file4.write("drop FinalVelocityx;\n")
file4.write("drop FinalVelocityy;\n")
file4.write("drop FinalVelocityz;\n")
file4.write("#--------------------------------------------------------------------------\n")
file4.write("solve;\n")
file4.write("#-----------------------------------------------------------------------\n")
file4.write("#--------------------------------------------------------------------------\n")
file4.write("#Print The Initial Guess x,y,z,dx,dy,dz,m,ux,uy,uz variables\n\n")
file4.write("param m{i in I} := 1;\n")
file4.write("include include/writeinitialguess.inc;\n")
file4.write("purge m;\n\n")
file4.write("#Print the initial and final times\n")
file4.write("printf \"%17.16e, %17.16e \\n\", ti/d2u , tF-ti/d2u > out/TimesGuess.out;\n")
file4.write("close out/TimesGuess.out;\n")
file4.write("#------------------------------------------------------------------------\n\n")
file4.write("#--------------------------------------------------------------------------\n")
file4.write("#Clean up\n")
file4.write("unfix timod;\n")
file4.write("unfix tfmod;\n")
file4.write("restore FinalPositionx;\n")
file4.write("restore FinalPositiony;\n")
file4.write("restore FinalPositionz;\n")
file4.write("restore FinalVelocityx;\n")
file4.write("restore FinalVelocityy;\n")
file4.write("restore FinalVelocityz;\n")
file4.write("drop thrustON;\n")
file4.write("drop position;\n")
| gpl-2.0 |
prakxys/flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/flask/json.py | 428 | 8113 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import datetime
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
try:
from itsdangerous import simplejson as _json
except ImportError:
from itsdangerous import json as _json
# figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a ``TypeError``).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, datetime):
return http_date(o)
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson encoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overriden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(unicode(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""Creates a :class:`~flask.Response` with the JSON representation of
the given arguments with an `application/json` mimetype. The arguments
to this function are the same as to the :class:`dict` constructor.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
For security reasons only objects are supported toplevel. For more
information about this, have a look at :ref:`json-security`.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
.. versionadded:: 0.2
"""
indent = None
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \
and not request.is_xhr:
indent = 2
return current_app.response_class(dumps(dict(*args, **kwargs),
indent=indent),
mimetype='application/json')
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| apache-2.0 |
tuxfux-hlp-notes/python-batches | batch-67/12-modules/myenv/lib/python2.7/site-packages/setuptools/command/rotate.py | 389 | 2164 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import shutil
from setuptools.extern import six
from setuptools import Command
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, six.string_types):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.unlink(f)
| gpl-3.0 |
KarolBedkowski/webmon | webmon/main.py | 1 | 16243 | #!/usr/bin/python3
"""
Main functions.
Copyright (c) Karol Będkowski, 2016-2018
This file is part of webmon.
Licence: GPLv2+
"""
import argparse
from concurrent import futures
import datetime
import imp
import locale
import logging
import os.path
import pprint
import time
import typing as ty
# import typecheck as tc
from . import (cache, common, comparators, config, filters, inputs,
logging_setup, outputs, metrics)
__author__ = "Karol Będkowski"
__copyright__ = "Copyright (c) Karol Będkowski, 2016-2018"
VERSION = "0.2"
APP_NAME = "webmon"
DEFAULT_DIFF_MODE = "ndiff"
_LOG = logging.getLogger("main")
# @tc.typecheck
def compare_contents(prev_content: str, content: str, ctx: common.Context,
result: common.Result) \
-> ty.Tuple[bool, ty.Optional[str], ty.Optional[dict]]:
""" Compare contents according to configuration. """
# pylint: disable=invalid-sequence-index
opts = ctx.input_conf.get("diff_options")
comparator = comparators.get_comparator(
ctx.input_conf["diff_mode"] or DEFAULT_DIFF_MODE, opts)
update_date = result.meta.get('update_date') or time.time()
compared, diff, new_meta = comparator.compare(
prev_content, str(datetime.datetime.fromtimestamp(update_date)),
content, str(datetime.datetime.now()), ctx, result.meta)
# ctx.log_debug("compare: diff: %s", diff)
return compared, diff, {'comparator_opts': new_meta}
# @tc.typecheck
def compare_content_new(content: str, ctx: common.Context,
result: common.Result) -> ty.Tuple[str, dict]:
# pylint: disable=invalid-sequence-index
opts = ctx.input_conf.get("diff_options")
comparator = comparators.get_comparator(
ctx.input_conf["diff_mode"] or DEFAULT_DIFF_MODE, opts)
diff, new_meta = comparator.new(
content, str(datetime.datetime.now()), ctx, result.meta)
return diff, {'comparator_opts': new_meta}
# @tc.typecheck
def check_last_error_time(ctx: common.Context) -> bool:
"""
Return true when load error occurred and still `on_error_wait` interval
not pass.
"""
last_error = ctx.metadata.get('last_error')
on_error_wait = ctx.input_conf.get('on_error_wait')
if last_error and on_error_wait:
on_error_wait = common.parse_interval(on_error_wait)
return time.time() < last_error + on_error_wait
return False
# @tc.typecheck
def load_content(loader, ctx: common.Context) -> common.Result:
""" Load & filter content """
start = time.time()
# load list of parts
result = loader.load()
if ctx.debug:
ctx.log_debug("loaded: %s", result)
result.debug['loaded_duration'] = time.time() - start
fltr_start = time.time()
result.debug['items_loaded'] = len(result.items)
result.debug['filters_status'] = {}
# apply filters
for fltcfg in ctx.input_conf.get('filters') or []:
flt = filters.get_filter(fltcfg, ctx)
if not flt:
ctx.log_error("missing filter: %s", fltcfg)
continue
result = flt.filter(result)
if ctx.debug:
ctx.log_debug("filtered by %s: %s", flt, pprint.saferepr(result))
result.debug['filters_status'][flt.name] = len(result.items)
if ctx.args.debug:
result.meta['filter_duration'] = time.time() - fltr_start
result.debug['items_filterd'] = len(result.items)
result.meta['update_duration'] = time.time() - start
result.meta['update_date'] = time.time()
if not result.title:
result.title = ctx.name
if ctx.debug:
ctx.log_debug("result: %s", result)
return result
# @tc.typecheck
def process_content(ctx: common.Context, result: common.Result) \
-> ty.Tuple[str, str, ty.Optional[dict], str]:
"""Detect content status (changes). Returns content formatted to
write into cache.
Returns (status, diff_result, new metadata, content after processing)
"""
# pylint: disable=invalid-sequence-index
status = result.status
if status == common.STATUS_ERROR:
err = result.meta['error']
return common.STATUS_ERROR, err, None, None
prev_content = ctx.cache.get(ctx.oid)
content = result.format()
if status == common.STATUS_UNCHANGED:
ctx.log_debug("loading - unchanged content")
new_meta = {'comparator_opts': ctx.metadata.get('comparator_opts')}
return (common.STATUS_UNCHANGED, prev_content, new_meta, prev_content)
if prev_content is None:
ctx.log_debug("loading - new content")
content, new_meta = compare_content_new(content, ctx, result)
return common.STATUS_NEW, content, new_meta, content
new_meta = None
if prev_content != content:
ctx.log_debug("loading - changed content, making diff")
diff_result, diff, new_meta = compare_contents(
prev_content, content, ctx, result)
if diff_result:
return common.STATUS_CHANGED, diff, new_meta, content
ctx.log_debug("loading - unchanged content. %r", new_meta)
if new_meta is None:
new_meta = {'comparator_opts': ctx.metadata.get('comparator_opts')}
return (common.STATUS_UNCHANGED, prev_content, new_meta, content)
# @tc.typecheck
def create_error_result(ctx: common.Context, error_msg: str) \
-> common.Result:
result = common.Result(ctx.oid, ctx.input_idx)
result.set_error(error_msg)
return result
# @tc.typecheck
def load(ctx: common.Context) -> bool:
""" Load one input defined & configured by context"""
ctx.log_debug("start loading")
ctx.metadata = ctx.cache.get_meta(ctx.oid) or {}
# find loader
loader = inputs.get_input(ctx)
# check, is update required
if not ctx.args.force and not loader.need_update():
ctx.log_info("no update required")
return False
if not ctx.args.force and check_last_error_time(ctx):
ctx.log_info("waiting after error")
return False
# load
ctx.log_info("loading...")
try:
result = load_content(loader, ctx)
except common.InputError as err:
ctx.log_exception("input error on %s: %r", err.input, err)
ctx.log_debug("input error params: %s", err.input.dump_debug())
result = create_error_result(ctx, str(err))
except common.FilterError as err:
ctx.log_exception("filter error on %s: %r", err.filter, err)
ctx.log_debug("filter error params: %s", err.filter.dump_debug())
result = create_error_result(ctx, str(err))
if ctx.args.debug:
result.debug['items_final'] = len(result.items)
result.debug['last_updated'] = ctx.last_updated
try:
result.status, pres, new_meta, content = process_content(ctx, result)
except Exception as err: # pylint: disable=broad-except
ctx.log_exception("processing error: %r", err)
result = create_error_result(ctx, str(err))
result.status, pres, new_meta, content = process_content(ctx, result)
if new_meta:
result.meta.update(new_meta)
if result.status != common.STATUS_UNCHANGED or \
ctx.input_conf.get("report_unchanged"):
ctx.output.put(result, pres, ctx.input_conf)
if content is not None:
ctx.cache.put(ctx.oid, content)
ctx.cache.put_meta(ctx.oid, result.meta)
metrics.COLLECTOR.put_input(ctx, result)
ctx.log_info("loading done")
del loader
return True
def _parse_options():
parser = argparse.ArgumentParser(description=APP_NAME + " " + VERSION)
parser.add_argument('-i', '--inputs',
help='yaml file containing inputs definition'
' (default inputs.yaml)')
parser.add_argument('-c', '--config',
help='configuration filename (default config.yaml)')
parser.add_argument("-s", "--silent", action="store_true",
help="show only errors and warnings")
parser.add_argument("-v", "--verbose", action="store_true",
help="show additional informations")
parser.add_argument("-d", "--debug", action="store_true",
help="print debug informations")
parser.add_argument('--log',
help='log file name')
parser.add_argument('--cache-dir',
default="~/.cache/" + APP_NAME,
help='path to cache directory')
parser.add_argument("--force", action="store_true",
help="force update all sources")
parser.add_argument("--diff-mode", choices=['ndiff', 'unified', 'context'],
help="default diff mode")
parser.add_argument("--abilities", action="store_true",
help="show available filters/inputs/outputs/"
"comparators")
parser.add_argument("--list-inputs", action="store_true",
help="show configured inputs")
parser.add_argument("--sel", help="select (by idx, separated by comma) "
"inputs to update")
parser.add_argument("--tasks", help="background task to launch",
type=int, default=2)
return parser.parse_args()
def _show_abilities_cls(title, base_cls):
print(title)
for name, cls in common.get_subclasses_with_name(base_cls):
print(" -", name)
if hasattr(cls, "description"):
print(" " + cls.description)
if hasattr(cls, "params") and cls.params:
print(" Parameters:")
for param in cls.params:
print(" - {:<15s}\t{:<20s}\tdef={!r:<10}\treq={!r}".format(
*param))
print()
def show_abilities():
_show_abilities_cls("Inputs:", inputs.AbstractInput)
_show_abilities_cls("Outputs:", outputs.AbstractOutput)
_show_abilities_cls("Filters:", filters.AbstractFilter)
_show_abilities_cls("Comparators:", comparators.AbstractComparator)
def _load_user_classes():
users_scripts_dir = os.path.expanduser("~/.local/share/webmon")
if not os.path.isdir(users_scripts_dir):
return
for fname in os.listdir(users_scripts_dir):
fpath = os.path.join(users_scripts_dir, fname)
if os.path.isfile(fpath) and fname.endswith(".py") \
and not fname.startswith("_"):
_LOG.debug("loading %r", fpath)
try:
imp.load_source(fname[:-3], fpath)
except ImportError as err:
_LOG.error("Importing '%s' error %s", fpath, err)
# @tc.typecheck
def _list_inputs(inps, conf, args):
print("Inputs:")
defaults = _build_defaults(args, conf)
for idx, inp_conf in enumerate(inps, 1):
params = common.apply_defaults(defaults, inp_conf)
name = config.get_input_name(params, idx)
act = "" if params.get("enable", True) else "DISABLE"
print(" {:2d} {:<40s} {}".format(idx, name, act))
# @tc.typecheck
def _list_inputs_dbg(inps, conf, args):
try:
gcache = cache.Cache(os.path.join(
os.path.expanduser(args.cache_dir), "cache"))
except IOError:
_LOG.error("Init cache error")
return
print("Inputs:")
defaults = _build_defaults(args, conf)
for idx, inp_conf in enumerate(inps, 1):
params = common.apply_defaults(defaults, inp_conf)
ctx = common.Context(params, gcache, idx, None, args)
ctx.metadata = ctx.cache.get_meta(ctx.oid) or {}
if ctx.last_updated:
last_update = time.strftime("%x %X",
time.localtime(ctx.last_updated))
else:
last_update = 'never loaded'
loader = inputs.get_input(ctx)
next_update_ts = loader.next_update()
if next_update_ts:
next_update = time.strftime(
"%x %X", time.localtime(next_update_ts))
else:
next_update = 'now'
print(" {:2d} {:<40s} {} last: {} next: {} {} {}".format(
idx,
config.get_input_name(params, idx),
"ENB" if params.get("enable", True) else "DIS",
last_update, next_update,
ctx.metadata.get('status'),
config.gen_input_oid(params)
))
def _build_defaults(args, conf):
defaults = {}
defaults.update(config.DEFAULTS)
defaults.update(conf.get("defaults") or {})
defaults["diff_mode"] = args.diff_mode
return defaults
def load_all(args, inps, conf, selection=None):
""" Load all (or selected) inputs"""
metrics.configure(conf)
start = time.time()
try:
gcache = cache.Cache(os.path.join(
os.path.expanduser(args.cache_dir), "cache"))
except IOError:
_LOG.error("Init cache error")
return
partial_reports_dir = os.path.join(
os.path.expanduser(args.cache_dir), "partials")
try:
output = outputs.OutputManager(conf, partial_reports_dir)
except RuntimeError as err:
_LOG.error("Init parts dir error: %s", err)
return
# defaults for inputs
defaults = _build_defaults(args, conf)
def task(idx, iconf):
params = common.apply_defaults(defaults, iconf)
ctx = common.Context(params, gcache, idx, output, args)
try:
load(ctx)
except Exception as err: # pylint: disable=broad-except
ctx.log_exception("loading error: %s", err)
ctx.output.put_error(ctx, str(err))
del ctx
with futures.ThreadPoolExecutor(max_workers=args.tasks or 2) as ex:
wait_for = [
ex.submit(task, idx, iconf)
for idx, iconf in enumerate(inps, 1)
if not selection or idx in selection
]
futures.wait(wait_for)
_LOG.info("Loading: all done")
metrics.COLLECTOR.put_loading_summary(time.time() - start)
footer = " ".join((APP_NAME, VERSION, time.asctime()))
output.write(footer=footer, debug=args.debug)
# if processing all files - clean unused / old cache files
if not selection:
gcache.clean_cache()
metrics.COLLECTOR.put_total(time.time() - start)
metrics.COLLECTOR.write()
def check_libraries():
try:
from lxml import etree
except ImportError:
_LOG.warn("missing lxml library")
try:
import cssselect
except ImportError:
_LOG.warn("missing cssselect library")
try:
import html2text
except ImportError:
_LOG.warn("missing html2text library")
try:
import docutils.core
except ImportError:
_LOG.warn("missing docutils library")
try:
import yaml
except ImportError:
_LOG.warn("missing yaml library")
try:
import requests
except ImportError:
_LOG.warn("missing requests library")
try:
import feedparser
except ImportError:
_LOG.warn("missing feedparser library")
try:
import github3
except ImportError:
_LOG.warn("missing github3 library")
def main():
"""Main function."""
locale.setlocale(locale.LC_ALL, locale.getdefaultlocale())
args = _parse_options()
logging_setup.setup(args.log, args.debug, args.silent)
check_libraries()
# if not args.debug:
# tc.disable()
_load_user_classes()
if args.abilities:
show_abilities()
return
inps = config.load_inputs(args.inputs)
if not inps:
return
conf = config.load_configuration(args.config)
if not conf:
return
if args.list_inputs:
with config.lock():
if args.verbose:
_list_inputs_dbg(inps, conf, args)
else:
_list_inputs(inps, conf, args)
return
selection = None
if args.sel:
try:
selection = set(int(idx.strip()) for idx in args.sel.split(","))
except ValueError:
_LOG.error("Invalid --sel parameter - expected numbers separated"
"by comma")
return
try:
with config.lock():
load_all(args, inps, conf, selection)
except RuntimeError:
pass
if __name__ == "__main__":
main()
| gpl-2.0 |
Gillu13/scipy | scipy/optimize/_lsq/least_squares.py | 3 | 36471 | """Generic interface for least-square minimization."""
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional function of n variables) and
the loss function rho(s) (a scalar function), `least_squares` finds a
local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| bsd-3-clause |
fangxingli/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/extension/mdrpi.py | 36 | 10117 | #!/usr/bin/env python
#
# Generated Mon Jun 27 09:54:22 2011 by parse_xsd.py version 0.4.
#
import saml2
from saml2 import SamlBase
from saml2 import md
NAMESPACE = 'urn:oasis:names:tc:SAML:metadata:rpi'
class RegistrationPolicy(md.LocalizedURIType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:RegistrationPolicy element """
c_tag = 'RegistrationPolicy'
c_namespace = NAMESPACE
c_children = md.LocalizedURIType_.c_children.copy()
c_attributes = md.LocalizedURIType_.c_attributes.copy()
c_child_order = md.LocalizedURIType_.c_child_order[:]
c_cardinality = md.LocalizedURIType_.c_cardinality.copy()
def registration_policy_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationPolicy, xml_string)
class UsagePolicy(md.LocalizedURIType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:UsagePolicy element """
c_tag = 'UsagePolicy'
c_namespace = NAMESPACE
c_children = md.LocalizedURIType_.c_children.copy()
c_attributes = md.LocalizedURIType_.c_attributes.copy()
c_child_order = md.LocalizedURIType_.c_child_order[:]
c_cardinality = md.LocalizedURIType_.c_cardinality.copy()
def usage_policy_from_string(xml_string):
return saml2.create_class_from_xml_string(UsagePolicy, xml_string)
class PublicationType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:rpi:PublicationType element """
c_tag = 'PublicationType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['publisher'] = ('publisher', 'string', True)
c_attributes['creationInstant'] = ('creation_instant', 'dateTime', False)
c_attributes['publicationId'] = ('publication_id', 'string', False)
def __init__(self,
publisher=None,
creation_instant=None,
publication_id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.publisher = publisher
self.creation_instant = creation_instant
self.publication_id = publication_id
def publication_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublicationType_, xml_string)
class RegistrationInfoType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:rpi:RegistrationInfoType element """
c_tag = 'RegistrationInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:metadata:rpi}RegistrationPolicy'] = (
'registration_policy', [RegistrationPolicy])
c_cardinality['registration_policy'] = {"min": 0}
c_attributes['registrationAuthority'] = (
'registration_authority', 'string', True)
c_attributes['registrationInstant'] = (
'registration_instant', 'dateTime', False)
c_child_order.extend(['registration_policy'])
def __init__(self,
registration_policy=None,
registration_authority=None,
registration_instant=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.registration_policy = registration_policy or []
self.registration_authority = registration_authority
self.registration_instant = registration_instant
def registration_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationInfoType_, xml_string)
class PublicationInfoType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:rpi:PublicationInfoType element """
c_tag = 'PublicationInfoType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:metadata:rpi}UsagePolicy'] = (
'usage_policy', [UsagePolicy])
c_cardinality['usage_policy'] = {"min": 0}
c_attributes['publisher'] = ('publisher', 'string', True)
c_attributes['creationInstant'] = ('creation_instant', 'dateTime', False)
c_attributes['publicationId'] = ('publication_id', 'string', False)
c_child_order.extend(['usage_policy'])
def __init__(self,
usage_policy=None,
publisher=None,
creation_instant=None,
publication_id=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.usage_policy = usage_policy or []
self.publisher = publisher
self.creation_instant = creation_instant
self.publication_id = publication_id
def publication_info_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublicationInfoType_, xml_string)
class Publication(PublicationType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:Publication element """
c_tag = 'Publication'
c_namespace = NAMESPACE
c_children = PublicationType_.c_children.copy()
c_attributes = PublicationType_.c_attributes.copy()
c_child_order = PublicationType_.c_child_order[:]
c_cardinality = PublicationType_.c_cardinality.copy()
def publication_from_string(xml_string):
return saml2.create_class_from_xml_string(Publication, xml_string)
class RegistrationInfo(RegistrationInfoType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:RegistrationInfo element """
c_tag = 'RegistrationInfo'
c_namespace = NAMESPACE
c_children = RegistrationInfoType_.c_children.copy()
c_attributes = RegistrationInfoType_.c_attributes.copy()
c_child_order = RegistrationInfoType_.c_child_order[:]
c_cardinality = RegistrationInfoType_.c_cardinality.copy()
def registration_info_from_string(xml_string):
return saml2.create_class_from_xml_string(RegistrationInfo, xml_string)
class PublicationInfo(PublicationInfoType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:PublicationInfo element """
c_tag = 'PublicationInfo'
c_namespace = NAMESPACE
c_children = PublicationInfoType_.c_children.copy()
c_attributes = PublicationInfoType_.c_attributes.copy()
c_child_order = PublicationInfoType_.c_child_order[:]
c_cardinality = PublicationInfoType_.c_cardinality.copy()
def publication_info_from_string(xml_string):
return saml2.create_class_from_xml_string(PublicationInfo, xml_string)
class PublicationPathType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:rpi:PublicationPathType element """
c_tag = 'PublicationPathType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{urn:oasis:names:tc:SAML:metadata:rpi}Publication'] = (
'publication', [Publication])
c_cardinality['publication'] = {"min": 0}
c_child_order.extend(['publication'])
def __init__(self,
publication=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.publication = publication or []
def publication_path_type__from_string(xml_string):
return saml2.create_class_from_xml_string(PublicationPathType_, xml_string)
class PublicationPath(PublicationPathType_):
"""The urn:oasis:names:tc:SAML:metadata:rpi:PublicationPath element """
c_tag = 'PublicationPath'
c_namespace = NAMESPACE
c_children = PublicationPathType_.c_children.copy()
c_attributes = PublicationPathType_.c_attributes.copy()
c_child_order = PublicationPathType_.c_child_order[:]
c_cardinality = PublicationPathType_.c_cardinality.copy()
def publication_path_from_string(xml_string):
return saml2.create_class_from_xml_string(PublicationPath, xml_string)
ELEMENT_FROM_STRING = {
RegistrationInfo.c_tag: registration_info_from_string,
RegistrationInfoType_.c_tag: registration_info_type__from_string,
RegistrationPolicy.c_tag: registration_policy_from_string,
PublicationInfo.c_tag: publication_info_from_string,
PublicationInfoType_.c_tag: publication_info_type__from_string,
UsagePolicy.c_tag: usage_policy_from_string,
PublicationPath.c_tag: publication_path_from_string,
PublicationPathType_.c_tag: publication_path_type__from_string,
Publication.c_tag: publication_from_string,
PublicationType_.c_tag: publication_type__from_string,
}
ELEMENT_BY_TAG = {
'RegistrationInfo': RegistrationInfo,
'RegistrationInfoType': RegistrationInfoType_,
'RegistrationPolicy': RegistrationPolicy,
'PublicationInfo': PublicationInfo,
'PublicationInfoType': PublicationInfoType_,
'UsagePolicy': UsagePolicy,
'PublicationPath': PublicationPath,
'PublicationPathType': PublicationPathType_,
'Publication': Publication,
'PublicationType': PublicationType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| apache-2.0 |
plxaye/chromium | src/third_party/protobuf/python/ez_setup.py | 454 | 10334 | #!python
# This file was obtained from:
# http://peak.telecommunity.com/dist/ez_setup.py
# on 2011/1/21.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/SQLAlchemy-1.0.6/examples/association/dict_of_sets_with_default.py | 30 | 2794 | """dict_of_sets_with_default.py
an advanced association proxy example which
illustrates nesting of association proxies to produce multi-level Python
collections, in this case a dictionary with string keys and sets of integers
as values, which conceal the underlying mapped classes.
This is a three table model which represents a parent table referencing a
dictionary of string keys and sets as values, where each set stores a
collection of integers. The association proxy extension is used to hide the
details of this persistence. The dictionary also generates new collections
upon access of a non-existent key, in the same manner as Python's
"collections.defaultdict" object.
"""
from sqlalchemy import String, Integer, Column, create_engine, ForeignKey
from sqlalchemy.orm import relationship, Session
from sqlalchemy.orm.collections import MappedCollection
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
import operator
class Base(object):
id = Column(Integer, primary_key=True)
Base = declarative_base(cls=Base)
class GenDefaultCollection(MappedCollection):
def __missing__(self, key):
self[key] = b = B(key)
return b
class A(Base):
__tablename__ = "a"
associations = relationship("B",
collection_class=lambda: GenDefaultCollection(operator.attrgetter("key"))
)
collections = association_proxy("associations", "values")
"""Bridge the association from 'associations' over to the 'values'
association proxy of B.
"""
class B(Base):
__tablename__ = "b"
a_id = Column(Integer, ForeignKey("a.id"), nullable=False)
elements = relationship("C", collection_class=set)
key = Column(String)
values = association_proxy("elements", "value")
"""Bridge the association from 'elements' over to the
'value' element of C."""
def __init__(self, key, values=None):
self.key = key
if values:
self.values = values
class C(Base):
__tablename__ = "c"
b_id = Column(Integer, ForeignKey("b.id"), nullable=False)
value = Column(Integer)
def __init__(self, value):
self.value = value
if __name__ == '__main__':
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
# only "A" is referenced explicitly. Using "collections",
# we deal with a dict of key/sets of integers directly.
session.add_all([
A(collections={
"1": set([1, 2, 3]),
})
])
session.commit()
a1 = session.query(A).first()
print(a1.collections["1"])
a1.collections["1"].add(4)
session.commit()
a1.collections["2"].update([7, 8, 9])
session.commit()
print(a1.collections["2"])
| mit |
kenshay/ImageScripter | Script_Runner/PYTHON/Lib/site-packages/pyasn1/type/univ.py | 5 | 100158 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
import math
import sys
from pyasn1 import error
from pyasn1.codec.ber import eoo
from pyasn1.compat import binary
from pyasn1.compat import integer
from pyasn1.compat import octets
from pyasn1.type import base
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import tagmap
NoValue = base.NoValue
noValue = NoValue()
__all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null',
'ObjectIdentifier', 'Real', 'Enumerated',
'SequenceOfAndSetOfBase', 'SequenceOf', 'SetOf',
'SequenceAndSetBase', 'Sequence', 'Set', 'Choice', 'Any',
'NoValue', 'noValue']
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class ErrorCode(Integer):
'''
ASN.1 specification:
ErrorCode ::=
INTEGER { disk-full(1), no-disk(-1),
disk-not-formatted(2) }
error ErrorCode ::= disk-full
'''
namedValues = NamedValues(
('disk-full', 1), ('no-disk', -1),
('disk-not-formatted', 2)
)
error = ErrorCode('disk-full')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
def __init__(self, value=noValue, **kwargs):
if 'namedValues' not in kwargs:
kwargs['namedValues'] = self.namedValues
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
def __and__(self, value):
return self.clone(self._value & value)
def __rand__(self, value):
return self.clone(value & self._value)
def __or__(self, value):
return self.clone(self._value | value)
def __ror__(self, value):
return self.clone(value | self._value)
def __xor__(self, value):
return self.clone(self._value ^ value)
def __rxor__(self, value):
return self.clone(value ^ self._value)
def __lshift__(self, value):
return self.clone(self._value << value)
def __rshift__(self, value):
return self.clone(self._value >> value)
def __add__(self, value):
return self.clone(self._value + value)
def __radd__(self, value):
return self.clone(value + self._value)
def __sub__(self, value):
return self.clone(self._value - value)
def __rsub__(self, value):
return self.clone(value - self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self.clone(value * self._value)
def __mod__(self, value):
return self.clone(self._value % value)
def __rmod__(self, value):
return self.clone(value % self._value)
def __pow__(self, value, modulo=None):
return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, self._value))
def __floordiv__(self, value):
return self.clone(self._value // value)
def __rfloordiv__(self, value):
return self.clone(value // self._value)
if sys.version_info[0] <= 2:
def __div__(self, value):
if isinstance(value, float):
return Real(self._value / value)
else:
return self.clone(self._value / value)
def __rdiv__(self, value):
if isinstance(value, float):
return Real(value / self._value)
else:
return self.clone(value / self._value)
else:
def __truediv__(self, value):
return Real(self._value / value)
def __rtruediv__(self, value):
return Real(value / self._value)
def __divmod__(self, value):
return self.clone(divmod(self._value, value))
def __rdivmod__(self, value):
return self.clone(divmod(value, self._value))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self):
return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self):
return long(self._value)
def __float__(self):
return float(self._value)
def __abs__(self):
return self.clone(abs(self._value))
def __index__(self):
return int(self._value)
def __pos__(self):
return self.clone(+self._value)
def __neg__(self):
return self.clone(-self._value)
def __invert__(self):
return self.clone(~self._value)
def __round__(self, n=0):
r = round(self._value, n)
if n:
return self.clone(r)
else:
return r
def __floor__(self):
return math.floor(self._value)
def __ceil__(self):
return math.ceil(self._value)
if sys.version_info[0:2] > (2, 5):
def __trunc__(self):
return self.clone(math.trunc(self._value))
def __lt__(self, value):
return self._value < value
def __le__(self, value):
return self._value <= value
def __eq__(self, value):
return self._value == value
def __ne__(self, value):
return self._value != value
def __gt__(self, value):
return self._value > value
def __ge__(self, value):
return self._value >= value
def prettyIn(self, value):
try:
return int(value)
except ValueError:
try:
return self.namedValues[value]
except KeyError:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
try:
return str(self.namedValues[value])
except KeyError:
return str(value)
# backward compatibility
def getNamedValues(self):
return self.namedValues
class Boolean(Integer):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or boolean or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class RoundResult(Boolean):
'''
ASN.1 specification:
RoundResult ::= BOOLEAN
ok RoundResult ::= TRUE
ko RoundResult ::= FALSE
'''
ok = RoundResult(True)
ko = RoundResult(False)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues(('False', 0), ('True', 1))
# Optimization for faster codec lookup
typeId = Integer.getTypeId()
if sys.version_info[0] < 3:
SizedIntegerBase = long
else:
SizedIntegerBase = int
class SizedInteger(SizedIntegerBase):
bitLength = leadingZeroBits = None
def setBitLength(self, bitLength):
self.bitLength = bitLength
self.leadingZeroBits = max(bitLength - integer.bitLength(self), 0)
return self
def __len__(self):
if self.bitLength is None:
self.setBitLength(integer.bitLength(self))
return self.bitLength
class BitString(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type both Python :class:`tuple` (as a tuple
of bits) and :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal representing binary or hexadecimal
number or sequence of integer bits or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Rights(BitString):
'''
ASN.1 specification:
Rights ::= BIT STRING { user-read(0), user-write(1),
group-read(2), group-write(3),
other-read(4), other-write(5) }
group1 Rights ::= { group-read, group-write }
group2 Rights ::= '0011'B
group3 Rights ::= '3'H
'''
namedValues = NamedValues(
('user-read', 0), ('user-write', 1),
('group-read', 2), ('group-write', 3),
('other-read', 4), ('other-write', 5)
)
group1 = Rights(('group-read', 'group-write'))
group2 = Rights('0011')
group3 = Rights(0x3)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
defaultBinValue = defaultHexValue = noValue
def __init__(self, value=noValue, **kwargs):
if value is noValue:
if kwargs:
try:
value = self.fromBinaryString(kwargs.pop('binValue'), internalFormat=True)
except KeyError:
pass
try:
value = self.fromHexString(kwargs.pop('hexValue'), internalFormat=True)
except KeyError:
pass
if value is noValue:
if self.defaultBinValue is not noValue:
value = self.fromBinaryString(self.defaultBinValue, internalFormat=True)
elif self.defaultHexValue is not noValue:
value = self.fromHexString(self.defaultHexValue, internalFormat=True)
if 'namedValues' not in kwargs:
kwargs['namedValues'] = self.namedValues
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
def __str__(self):
return self.asBinary()
def __eq__(self, other):
other = self.prettyIn(other)
return self is other or self._value == other and len(self._value) == len(other)
def __ne__(self, other):
other = self.prettyIn(other)
return self._value != other or len(self._value) != len(other)
def __lt__(self, other):
other = self.prettyIn(other)
return len(self._value) < len(other) or len(self._value) == len(other) and self._value < other
def __le__(self, other):
other = self.prettyIn(other)
return len(self._value) <= len(other) or len(self._value) == len(other) and self._value <= other
def __gt__(self, other):
other = self.prettyIn(other)
return len(self._value) > len(other) or len(self._value) == len(other) and self._value > other
def __ge__(self, other):
other = self.prettyIn(other)
return len(self._value) >= len(other) or len(self._value) == len(other) and self._value >= other
# Immutable sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone([self[x] for x in range(*i.indices(len(self)))])
else:
length = len(self._value) - 1
if i > length or i < 0:
raise IndexError('bit index out of range')
return (self._value >> (length - i)) & 1
def __iter__(self):
length = len(self._value)
while length:
length -= 1
yield (self._value >> length) & 1
def __reversed__(self):
return reversed(tuple(self))
# arithmetic operators
def __add__(self, value):
value = self.prettyIn(value)
return self.clone(SizedInteger(self._value << len(value) | value).setBitLength(len(self._value) + len(value)))
def __radd__(self, value):
value = self.prettyIn(value)
return self.clone(SizedInteger(value << len(self._value) | self._value).setBitLength(len(self._value) + len(value)))
def __mul__(self, value):
bitString = self._value
while value > 1:
bitString <<= len(self._value)
bitString |= self._value
value -= 1
return self.clone(bitString)
def __rmul__(self, value):
return self * value
def __lshift__(self, count):
return self.clone(SizedInteger(self._value << count).setBitLength(len(self._value) + count))
def __rshift__(self, count):
return self.clone(SizedInteger(self._value >> count).setBitLength(max(0, len(self._value) - count)))
def __int__(self):
return self._value
def __float__(self):
return float(self._value)
if sys.version_info[0] < 3:
def __long__(self):
return self._value
def asNumbers(self):
"""Get |ASN.1| value as a sequence of 8-bit integers.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return tuple(octets.octs2ints(self.asOctets()))
def asOctets(self):
"""Get |ASN.1| value as a sequence of octets.
If |ASN.1| object length is not a multiple of 8, result
will be left-padded with zeros.
"""
return integer.to_bytes(self._value, length=len(self))
def asInteger(self):
"""Get |ASN.1| value as a single integer value.
"""
return self._value
def asBinary(self):
"""Get |ASN.1| value as a text string of bits.
"""
binString = binary.bin(self._value)[2:]
return '0' * (len(self._value) - len(binString)) + binString
@classmethod
def fromHexString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
try:
value = SizedInteger(value, 16).setBitLength(len(value) * 4)
except ValueError:
raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
@classmethod
def fromBinaryString(cls, value, internalFormat=False, prepend=None):
"""Create a |ASN.1| object initialized from a string of '0' and '1'.
Parameters
----------
value: :class:`str`
Text string like '1010111'
"""
try:
value = SizedInteger(value or '0', 2).setBitLength(len(value))
except ValueError:
raise error.PyAsn1Error('%s.fromBinaryString() error: %s' % (cls.__name__, sys.exc_info()[1]))
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
@classmethod
def fromOctetString(cls, value, internalFormat=False, prepend=None, padding=0):
"""Create a |ASN.1| object initialized from a string.
Parameters
----------
value: :class:`str` (Py2) or :class:`bytes` (Py3)
Text string like '\\\\x01\\\\xff' (Py2) or b'\\\\x01\\\\xff' (Py3)
"""
value = SizedInteger(integer.from_bytes(value) >> padding).setBitLength(len(value) * 8 - padding)
if prepend is not None:
value = SizedInteger(
(SizedInteger(prepend) << len(value)) | value
).setBitLength(len(prepend) + len(value))
if not internalFormat:
value = cls(value)
return value
def prettyIn(self, value):
if isinstance(value, SizedInteger):
return value
elif octets.isStringType(value):
if not value:
return SizedInteger(0).setBitLength(0)
elif value[0] == '\'': # "'1011'B" -- ASN.1 schema representation (deprecated)
if value[-2:] == '\'B':
return self.fromBinaryString(value[1:-2], internalFormat=True)
elif value[-2:] == '\'H':
return self.fromHexString(value[1:-2], internalFormat=True)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
elif self.namedValues and not value.isdigit(): # named bits like 'Urgent, Active'
names = [x.strip() for x in value.split(',')]
try:
bitPositions = [self.namedValues[name] for name in names]
except KeyError:
raise error.PyAsn1Error('unknown bit name(s) in %r' % (names,))
rightmostPosition = max(bitPositions)
number = 0
for bitPosition in bitPositions:
number |= 1 << (rightmostPosition - bitPosition)
return SizedInteger(number).setBitLength(rightmostPosition + 1)
elif value.startswith('0x'):
return self.fromHexString(value[2:], internalFormat=True)
elif value.startswith('0b'):
return self.fromBinaryString(value[2:], internalFormat=True)
else: # assume plain binary string like '1011'
return self.fromBinaryString(value, internalFormat=True)
elif isinstance(value, (tuple, list)):
return self.fromBinaryString(''.join([b and '1' or '0' for b in value]), internalFormat=True)
elif isinstance(value, BitString):
return SizedInteger(value).setBitLength(len(value))
elif isinstance(value, intTypes):
return SizedInteger(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
try:
# noinspection PyStatementEffect
all
except NameError: # Python 2.4
# noinspection PyShadowingBuiltins
def all(iterable):
for element in iterable:
if not element:
return False
return True
class OctetString(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3 :class:`bytes`.
When used in Unicode context, |ASN.1| type assumes "|encoding|" serialisation.
Keyword Args
------------
value: :class:`str`, :class:`bytes` or |ASN.1| object
string (Python 2) or bytes (Python 3), alternatively unicode object
(Python 2) or string (Python 3) representing character string to be
serialised into octets (note `encoding` parameter) or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in text string context.
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Icon(OctetString):
'''
ASN.1 specification:
Icon ::= OCTET STRING
icon1 Icon ::= '001100010011001000110011'B
icon2 Icon ::= '313233'H
'''
icon1 = Icon.fromBinaryString('001100010011001000110011')
icon2 = Icon.fromHexString('313233')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
defaultBinValue = defaultHexValue = noValue
encoding = 'iso-8859-1'
def __init__(self, value=noValue, **kwargs):
if kwargs:
if value is noValue:
try:
value = self.fromBinaryString(kwargs.pop('binValue'))
except KeyError:
pass
try:
value = self.fromHexString(kwargs.pop('hexValue'))
except KeyError:
pass
if value is noValue:
if self.defaultBinValue is not noValue:
value = self.fromBinaryString(self.defaultBinValue)
elif self.defaultHexValue is not noValue:
value = self.fromHexString(self.defaultHexValue)
if 'encoding' not in kwargs:
kwargs['encoding'] = self.encoding
base.AbstractSimpleAsn1Item.__init__(self, value, **kwargs)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
try:
return value.encode(self.encoding)
except (LookupError, UnicodeEncodeError):
raise error.PyAsn1Error(
"Can't encode string '%s' with codec %s" % (value, self.encoding)
)
elif isinstance(value, (tuple, list)):
try:
return ''.join([chr(x) for x in value])
except ValueError:
raise error.PyAsn1Error(
"Bad %s initializer '%s'" % (self.__class__.__name__, value)
)
else:
return str(value)
def __str__(self):
return str(self._value)
def __unicode__(self):
try:
return self._value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
"Can't decode string '%s' with codec %s" % (self._value, self.encoding)
)
def asOctets(self):
return str(self._value)
def asNumbers(self):
return tuple([ord(x) for x in self._value])
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
try:
return value.encode(self.encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
"Can't encode string '%s' with '%s' codec" % (value, self.encoding)
)
elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way
return value.asOctets()
elif isinstance(value, base.AbstractSimpleAsn1Item): # this mostly targets Integer objects
return self.prettyIn(str(value))
elif isinstance(value, (tuple, list)):
return self.prettyIn(bytes(value))
else:
return bytes(value)
def __str__(self):
try:
return self._value.decode(self.encoding)
except UnicodeDecodeError:
raise error.PyAsn1Error(
"Can't decode string '%s' with '%s' codec at '%s'" % (self._value, self.encoding, self.__class__.__name__)
)
def __bytes__(self):
return bytes(self._value)
def asOctets(self):
return bytes(self._value)
def asNumbers(self):
return tuple(self._value)
#
# Normally, `.prettyPrint()` is called from `__str__()`. Historically,
# OctetString.prettyPrint() used to return hexified payload
# representation in cases when non-printable content is present. At the
# same time `str()` used to produce either octet-stream (Py2) or
# text (Py3) representations.
#
# Therefore `OctetString.__str__()` -> `.prettyPrint()` call chain is
# reversed to preserve the original behaviour.
#
# Eventually we should deprecate `.prettyPrint()` / `.prettyOut()` harness
# and end up with just `__str__()` producing hexified representation while
# both text and octet-stream representation should only be requested via
# the `.asOctets()` method.
#
# Note: ASN.1 OCTET STRING is never mean to contain text!
#
def prettyOut(self, value):
return value
def prettyPrint(self, scope=0):
# first see if subclass has its own .prettyOut()
value = self.prettyOut(self._value)
if value is not self._value:
return value
numbers = self.asNumbers()
for x in numbers:
# hexify if needed
if x < 32 or x > 126:
return '0x' + ''.join(('%.2x' % x for x in numbers))
else:
# this prevents infinite recursion
return OctetString.__str__(self)
@staticmethod
def fromBinaryString(value):
"""Create a |ASN.1| object initialized from a string of '0' and '1'.
Parameters
----------
value: :class:`str`
Text string like '1010111'
"""
bitNo = 8
byte = 0
r = []
for v in value:
if bitNo:
bitNo -= 1
else:
bitNo = 7
r.append(byte)
byte = 0
if v in ('0', '1'):
v = int(v)
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte |= v << bitNo
r.append(byte)
return octets.ints2octs(r)
@staticmethod
def fromHexString(value):
"""Create a |ASN.1| object initialized from the hex string.
Parameters
----------
value: :class:`str`
Text string like 'DEADBEEF'
"""
r = []
p = []
for v in value:
if p:
r.append(int(p + v, 16))
p = None
else:
p = v
if p:
r.append(int(p + '0', 16))
return octets.ints2octs(r)
# Immutable sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone(self._value[i])
else:
return self._value[i]
def __iter__(self):
return iter(self._value)
def __contains__(self, value):
return value in self._value
def __add__(self, value):
return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value):
return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self * value
def __int__(self):
return int(self._value)
def __float__(self):
return float(self._value)
def __reversed__(self):
return reversed(self._value)
class Null(OctetString):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`str` objects (always empty).
Keyword Args
------------
value: :class:`str` or :py:class:`~pyasn1.type.univ.Null` object
Python empty string literal or any object that evaluates to `False`
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Ack(Null):
'''
ASN.1 specification:
Ack ::= NULL
'''
ack = Ack('')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(octets.str2octs(''))
# Optimization for faster codec lookup
typeId = OctetString.getTypeId()
def prettyIn(self, value):
if value:
return value
return octets.str2octs('')
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = (int,)
numericTypes = intTypes + (float,)
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`tuple` objects (tuple of non-negative integers).
Keyword Args
------------
value: :class:`tuple`, :class:`str` or |ASN.1| object
Python sequence of :class:`int` or string literal or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class ID(ObjectIdentifier):
'''
ASN.1 specification:
ID ::= OBJECT IDENTIFIER
id-edims ID ::= { joint-iso-itu-t mhs-motif(6) edims(7) }
id-bp ID ::= { id-edims 11 }
'''
id_edims = ID('2.6.7')
id_bp = id_edims + (11,)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
def __add__(self, other):
return self.clone(self._value + other)
def __radd__(self, other):
return self.clone(other + self._value)
def asTuple(self):
return self._value
# Sequence object protocol
def __len__(self):
return len(self._value)
def __getitem__(self, i):
if i.__class__ is slice:
return self.clone(self._value[i])
else:
return self._value[i]
def __iter__(self):
return iter(self._value)
def __contains__(self, value):
return value in self._value
def index(self, suboid):
return self._value.index(suboid)
def isPrefixOf(self, other):
"""Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
Parameters
----------
other: |ASN.1| object
|ASN.1| object
Returns
-------
: :class:`bool`
:class:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
or :class:`False` otherwise.
"""
l = len(self)
if l <= len(other):
if self._value[:l] == other[:l]:
return True
return False
def prettyIn(self, value):
if isinstance(value, ObjectIdentifier):
return tuple(value)
elif octets.isStringType(value):
if '-' in value:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
try:
return tuple([int(subOid) for subOid in value.split('.') if subOid])
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
try:
tupleOfInts = tuple([int(subOid) for subOid in value if subOid >= 0])
except (ValueError, TypeError):
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
)
if len(tupleOfInts) == len(value):
return tupleOfInts
raise error.PyAsn1Error('Malformed Object ID %s at %s' % (value, self.__class__.__name__))
def prettyOut(self, value):
return '.'.join([str(x) for x in value])
class Real(base.AbstractSimpleAsn1Item):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python :class:`float` objects.
Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its
elements are mantissa, base and exponent.
Keyword Args
------------
value: :class:`tuple`, :class:`float` or |ASN.1| object
Python sequence of :class:`int` (representing mantissa, base and
exponent) or float instance or *Real* class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Pi(Real):
'''
ASN.1 specification:
Pi ::= REAL
pi Pi ::= { mantissa 314159, base 10, exponent -5 }
'''
pi = Pi((314159, 10, -5))
"""
binEncBase = None # binEncBase = 16 is recommended for large numbers
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = _plusInf, _minusInf
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = base.AbstractSimpleAsn1Item.getTypeId()
@staticmethod
def __normalizeBase10(value):
m, b, e = value
while m and m % 10 == 0:
m /= 10
e += 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
if (not isinstance(value[0], numericTypes) or
not isinstance(value[1], intTypes) or
not isinstance(value[2], intTypes)):
raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
if (isinstance(value[0], float) and
self._inf and value[0] in self._inf):
return value[0]
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float) or octets.isStringType(value):
if octets.isStringType(value):
try:
value = float(value)
except ValueError:
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value *= 10
e -= 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyPrint(self, scope=0):
try:
return self.prettyOut(float(self))
except OverflowError:
return '<overflow>'
@property
def isPlusInf(self):
"""Indicate PLUS-INFINITY object value
Returns
-------
: :class:`bool`
:class:`True` if calling object represents plus infinity
or :class:`False` otherwise.
"""
return self._value == self._plusInf
@property
def isMinusInf(self):
"""Indicate MINUS-INFINITY object value
Returns
-------
: :class:`bool`
:class:`True` if calling object represents minus infinity
or :class:`False` otherwise.
"""
return self._value == self._minusInf
@property
def isInf(self):
return self._value in self._inf
def __add__(self, value):
return self.clone(float(self) + value)
def __radd__(self, value):
return self + value
def __mul__(self, value):
return self.clone(float(self) * value)
def __rmul__(self, value):
return self * value
def __sub__(self, value):
return self.clone(float(self) - value)
def __rsub__(self, value):
return self.clone(value - float(self))
def __mod__(self, value):
return self.clone(float(self) % value)
def __rmod__(self, value):
return self.clone(value % float(self))
def __pow__(self, value, modulo=None):
return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value):
return self.clone(float(self) / value)
def __rdiv__(self, value):
return self.clone(value / float(self))
else:
def __truediv__(self, value):
return self.clone(float(self) / value)
def __rtruediv__(self, value):
return self.clone(value / float(self))
def __divmod__(self, value):
return self.clone(float(self) // value)
def __rdivmod__(self, value):
return self.clone(value // float(self))
def __int__(self):
return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self):
return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self):
return self.clone(abs(float(self)))
def __pos__(self):
return self.clone(+float(self))
def __neg__(self):
return self.clone(-float(self))
def __round__(self, n=0):
r = round(float(self), n)
if n:
return self.clone(r)
else:
return r
def __floor__(self):
return self.clone(math.floor(float(self)))
def __ceil__(self):
return self.clone(math.ceil(float(self)))
if sys.version_info[0:2] > (2, 5):
def __trunc__(self):
return self.clone(math.trunc(float(self)))
def __lt__(self, value):
return float(self) < value
def __le__(self, value):
return float(self) <= value
def __eq__(self, value):
return float(self) == value
def __ne__(self, value):
return float(self) != value
def __gt__(self, value):
return float(self) > value
def __ge__(self, value):
return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self):
return bool(float(self))
else:
def __bool__(self):
return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
# compatibility stubs
def isPlusInfinity(self):
return self.isPlusInf
def isMinusInfinity(self):
return self.isMinusInf
def isInfinity(self):
return self.isInf
class Enumerated(Integer):
"""Create |ASN.1| type or object.
|ASN.1| objects are immutable and duck-type Python :class:`int` objects.
Keyword Args
------------
value: :class:`int`, :class:`str` or |ASN.1| object
Python integer or string literal or |ASN.1| class instance.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
Object representing non-default symbolic aliases for numbers
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class RadioButton(Enumerated):
'''
ASN.1 specification:
RadioButton ::= ENUMERATED { button1(0), button2(1),
button3(2) }
selected-by-default RadioButton ::= button1
'''
namedValues = NamedValues(
('button1', 0), ('button2', 1),
('button3', 2)
)
selected_by_default = RadioButton('button1')
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Optimization for faster codec lookup
typeId = Integer.getTypeId()
#: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
#: representing symbolic aliases for numbers
namedValues = namedval.NamedValues()
# "Structured" ASN.1 types
class SequenceOfAndSetOfBase(base.AbstractConstructedAsn1Item):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`list` objects.
Keyword Args
------------
componentType : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A pyasn1 object representing ASN.1 type allowed within |ASN.1| type
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class LotteryDraw(SequenceOf): # SetOf is similar
'''
ASN.1 specification:
LotteryDraw ::= SEQUENCE OF INTEGER
'''
componentType = Integer()
lotteryDraw = LotteryDraw()
lotteryDraw.extend([123, 456, 789])
"""
def __init__(self, *args, **kwargs):
# support positional params for backward compatibility
if args:
for key, value in zip(('componentType', 'tagSet',
'subtypeSpec', 'sizeSpec'), args):
if key in kwargs:
raise error.PyAsn1Error('Conflicting positional and keyword params!')
kwargs['componentType'] = value
base.AbstractConstructedAsn1Item.__init__(self, **kwargs)
# Python list protocol
def __getitem__(self, idx):
try:
return self.getComponentByPosition(idx)
except error.PyAsn1Error:
raise IndexError(sys.exc_info()[1])
def __setitem__(self, idx, value):
try:
self.setComponentByPosition(idx, value)
except error.PyAsn1Error:
raise IndexError(sys.exc_info()[1])
def clear(self):
self._componentValues = []
def append(self, value):
self[len(self)] = value
def count(self, value):
return self._componentValues.count(value)
def extend(self, values):
for value in values:
self.append(value)
def index(self, value, start=0, stop=None):
if stop is None:
stop = len(self)
try:
return self._componentValues.index(value, start, stop)
except error.PyAsn1Error:
raise ValueError(sys.exc_info()[1])
def reverse(self):
self._componentValues.reverse()
def sort(self, key=None, reverse=False):
self._componentValues.sort(key=key, reverse=reverse)
def __iter__(self):
return iter(self._componentValues)
def _cloneComponentValues(self, myClone, cloneValueFlag):
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue:
if isinstance(componentValue, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, componentValue.clone())
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
"""Return |ASN.1| type component value by position.
Equivalent to Python sequence subscription operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to an existing
component or to N+1 component (if *componentType* is set). In the latter
case a new component type gets instantiated and appended to the |ASN.1|
sequence.
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
Instantiate |ASN.1| component type or return existing component value
Examples
--------
.. code-block:: python
# can also be SetOf
class MySequenceOf(SequenceOf):
componentType = OctetString()
s = MySequenceOf()
# returns component #0 with `.isValue` property False
s.getComponentByPosition(0)
# returns None
s.getComponentByPosition(0, default=None)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
# sets component #0 to OctetString() ASN.1 schema
# object and returns it
s.getComponentByPosition(0, instantiate=True)
# sets component #0 to ASN.1 value object
s.setComponentByPosition(0, 'ABCD')
# returns OctetString('ABCD') value object
s.getComponentByPosition(0, instantiate=False)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
"""
try:
componentValue = self._componentValues[idx]
except IndexError:
if not instantiate:
return default
self.setComponentByPosition(idx)
componentValue = self._componentValues[idx]
if default is noValue or componentValue.isValue:
return componentValue
else:
return default
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`)
or list.append() (when idx == len(self)).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
Raises
------
IndexError:
When idx > len(self)
"""
componentType = self.componentType
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if len(self._componentValues) < idx:
raise error.PyAsn1Error('Component index out of range')
if value is noValue:
if componentType is not None:
value = componentType.clone()
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentType is not None and isinstance(componentType, base.AbstractSimpleAsn1Item):
value = componentType.clone(value=value)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('Non-ASN.1 value %r and undefined component type at %r' % (value, self))
elif componentType is not None:
if self.strictConstraints:
if not componentType.isSameTypeWith(value, matchTags, matchConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
else:
if not componentType.isSuperTypeOf(value, matchTags, matchConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if currentValue is noValue:
self._componentValues.append(value)
else:
self._componentValues[idx] = value
return self
@property
def componentTagMap(self):
if self.componentType is not None:
return self.componentType.tagMap
def prettyPrint(self, scope=0):
scope += 1
representation = self.__class__.__name__ + ':\n'
for idx, componentValue in enumerate(self._componentValues):
representation += ' ' * scope
if (componentValue is noValue and
self.componentType is not None):
representation += '<empty>'
else:
representation += componentValue.prettyPrint(scope)
return representation
def prettyPrintType(self, scope=0):
scope += 1
representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
if self.componentType is not None:
representation += ' ' * scope
representation += self.componentType.prettyPrintType(scope)
return representation + '\n' + ' ' * (scope - 1) + '}'
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
for componentValue in self._componentValues:
if componentValue is noValue or not componentValue.isValue:
return False
return True
class SequenceOf(SequenceOfAndSetOfBase):
__doc__ = SequenceOfAndSetOfBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
#: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = None
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceOfAndSetOfBase.getTypeId()
class SetOf(SequenceOfAndSetOfBase):
__doc__ = SequenceOfAndSetOfBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
#: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = None
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceOfAndSetOfBase.getTypeId()
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`dict` objects.
Keyword Args
------------
componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
Object holding named ASN.1 types allowed within this collection
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class Description(Sequence): # Set is similar
'''
ASN.1 specification:
Description ::= SEQUENCE {
surname IA5String,
first-name IA5String OPTIONAL,
age INTEGER DEFAULT 40
}
'''
componentType = NamedTypes(
NamedType('surname', IA5String()),
OptionalNamedType('first-name', IA5String()),
DefaultedNamedType('age', Integer(40))
)
descr = Description()
descr['surname'] = 'Smith'
descr['first-name'] = 'John'
"""
#: Default :py:class:`~pyasn1.type.namedtype.NamedTypes`
#: object representing named ASN.1 types allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
class DynamicNames(object):
"""Fields names/positions mapping for component-less objects"""
def __init__(self):
self._keyToIdxMap = {}
self._idxToKeyMap = {}
def __len__(self):
return len(self._keyToIdxMap)
def __contains__(self, item):
return item in self._keyToIdxMap or item in self._idxToKeyMap
def __iter__(self):
return (self._idxToKeyMap[idx] for idx in range(len(self._idxToKeyMap)))
def __getitem__(self, item):
try:
return self._keyToIdxMap[item]
except KeyError:
return self._idxToKeyMap[item]
def getNameByPosition(self, idx):
try:
return self._idxToKeyMap[idx]
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
try:
return self._keyToIdxMap[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def addField(self, idx):
self._keyToIdxMap['field-%d' % idx] = idx
self._idxToKeyMap[idx] = 'field-%d' % idx
def __init__(self, **kwargs):
base.AbstractConstructedAsn1Item.__init__(self, **kwargs)
self._componentTypeLen = len(self.componentType)
self._dynamicNames = self._componentTypeLen or self.DynamicNames()
def __getitem__(self, idx):
if octets.isStringType(idx):
try:
return self.getComponentByName(idx)
except error.PyAsn1Error:
# duck-typing dict
raise KeyError(sys.exc_info()[1])
else:
try:
return self.getComponentByPosition(idx)
except error.PyAsn1Error:
# duck-typing list
raise IndexError(sys.exc_info()[1])
def __setitem__(self, idx, value):
if octets.isStringType(idx):
try:
self.setComponentByName(idx, value)
except error.PyAsn1Error:
# duck-typing dict
raise KeyError(sys.exc_info()[1])
else:
try:
self.setComponentByPosition(idx, value)
except error.PyAsn1Error:
# duck-typing list
raise IndexError(sys.exc_info()[1])
def __contains__(self, key):
if self._componentTypeLen:
return key in self.componentType
else:
return key in self._dynamicNames
def __iter__(self):
return iter(self.componentType or self._dynamicNames)
# Python dict protocol
def values(self):
for idx in range(self._componentTypeLen or len(self._dynamicNames)):
yield self[idx]
def keys(self):
return iter(self)
def items(self):
for idx in range(self._componentTypeLen or len(self._dynamicNames)):
if self._componentTypeLen:
yield self.componentType[idx].name, self[idx]
else:
yield self._dynamicNames[idx], self[idx]
def update(self, *iterValue, **mappingValue):
for k, v in iterValue:
self[k] = v
for k in mappingValue:
self[k] = mappingValue[k]
def clear(self):
self._componentValues = []
self._dynamicNames = self.DynamicNames()
def _cloneComponentValues(self, myClone, cloneValueFlag):
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue:
if isinstance(componentValue, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, componentValue.clone())
def getComponentByName(self, name, default=noValue, instantiate=True):
"""Returns |ASN.1| type component by name.
Equivalent to Python :class:`dict` subscription operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
Instantiate |ASN.1| component type or return existing component value
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
def setComponentByName(self, name, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by name.
Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
Parameters
----------
name: :class:`str`
|ASN.1| type component name
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints: :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
if self._componentTypeLen:
idx = self.componentType.getPositionByName(name)
else:
try:
idx = self._dynamicNames.getPositionByName(name)
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
"""Returns |ASN.1| type component by index.
Equivalent to Python sequence subscription operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to an existing
component or (if *componentType* is set) new ASN.1 schema object gets
instantiated.
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a PyASN1 object
Examples
--------
.. code-block:: python
# can also be Set
class MySequence(Sequence):
componentType = NamedTypes(
NamedType('id', OctetString())
)
s = MySequence()
# returns component #0 with `.isValue` property False
s.getComponentByPosition(0)
# returns None
s.getComponentByPosition(0, default=None)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
# sets component #0 to OctetString() ASN.1 schema
# object and returns it
s.getComponentByPosition(0, instantiate=True)
# sets component #0 to ASN.1 value object
s.setComponentByPosition(0, 'ABCD')
# returns OctetString('ABCD') value object
s.getComponentByPosition(0, instantiate=False)
s.clear()
# returns noValue
s.getComponentByPosition(0, instantiate=False)
"""
try:
componentValue = self._componentValues[idx]
except IndexError:
componentValue = noValue
if not instantiate:
if componentValue is noValue or not componentValue.isValue:
return default
else:
return componentValue
if componentValue is noValue:
self.setComponentByPosition(idx)
componentValue = self._componentValues[idx]
if default is noValue or componentValue.isValue:
return componentValue
else:
return default
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx : :class:`int`
Component index (zero-based). Must either refer to existing
component (if *componentType* is set) or to N+1 component
otherwise. In the latter case a new component of given ASN.1
type gets instantiated and appended to |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
componentType = self.componentType
componentTypeLen = self._componentTypeLen
try:
currentValue = self._componentValues[idx]
except IndexError:
currentValue = noValue
if componentTypeLen:
if componentTypeLen < idx:
raise error.PyAsn1Error('component index out of range')
self._componentValues = [noValue] * componentTypeLen
if value is noValue:
if componentTypeLen:
value = componentType.getTypeByPosition(idx).clone()
elif currentValue is noValue:
raise error.PyAsn1Error('Component type not defined')
elif not isinstance(value, base.Asn1Item):
if componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if isinstance(subComponentType, base.AbstractSimpleAsn1Item):
value = subComponentType.clone(value=value)
else:
raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
elif currentValue is not noValue and isinstance(currentValue, base.AbstractSimpleAsn1Item):
value = currentValue.clone(value=value)
else:
raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
elif (matchTags or matchConstraints) and componentTypeLen:
subComponentType = componentType.getTypeByPosition(idx)
if subComponentType is not noValue:
subtypeChecker = (self.strictConstraints and
subComponentType.isSameTypeWith or
subComponentType.isSuperTypeOf)
if not subtypeChecker(value, matchTags, matchConstraints):
if not componentType[idx].openType:
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
if verifyConstraints and value.isValue:
try:
self.subtypeSpec(value, idx)
except error.PyAsn1Error:
exType, exValue, exTb = sys.exc_info()
raise exType('%s at %s' % (exValue, self.__class__.__name__))
if componentTypeLen or idx in self._dynamicNames:
self._componentValues[idx] = value
elif len(self._componentValues) == idx:
self._componentValues.append(value)
self._dynamicNames.addField(idx)
else:
raise error.PyAsn1Error('Component index out of range')
return self
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
componentType = self.componentType
if componentType:
for idx, subComponentType in enumerate(componentType.namedTypes):
if subComponentType.isDefaulted or subComponentType.isOptional:
continue
if not self._componentValues:
return False
componentValue = self._componentValues[idx]
if componentValue is noValue or not componentValue.isValue:
return False
else:
for componentValue in self._componentValues:
if componentValue is noValue or not componentValue.isValue:
return False
return True
def prettyPrint(self, scope=0):
"""Return an object representation string.
Returns
-------
: :class:`str`
Human-friendly object representation.
"""
scope += 1
representation = self.__class__.__name__ + ':\n'
for idx, componentValue in enumerate(self._componentValues):
if componentValue is not noValue and componentValue.isValue:
representation += ' ' * scope
if self.componentType:
representation += self.componentType.getNameByPosition(idx)
else:
representation += self._dynamicNames.getNameByPosition(idx)
representation = '%s=%s\n' % (
representation, componentValue.prettyPrint(scope)
)
return representation
def prettyPrintType(self, scope=0):
scope += 1
representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
for idx, componentType in enumerate(self.componentType.values() or self._componentValues):
representation += ' ' * scope
if self.componentType:
representation += '"%s"' % self.componentType.getNameByPosition(idx)
else:
representation += '"%s"' % self._dynamicNames.getNameByPosition(idx)
representation = '%s = %s\n' % (
representation, componentType.prettyPrintType(scope)
)
return representation + '\n' + ' ' * (scope - 1) + '}'
# backward compatibility
def setDefaultComponents(self):
return self
def getComponentType(self):
if self._componentTypeLen:
return self.componentType
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self.componentType[idx].name
class Sequence(SequenceAndSetBase):
__doc__ = SequenceAndSetBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object imposing size constraint on |ASN.1| objects
componentType = namedtype.NamedTypes()
# Disambiguation ASN.1 types identification
typeId = SequenceAndSetBase.getTypeId()
# backward compatibility
def getComponentTagMapNearPosition(self, idx):
if self.componentType:
return self.componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self.componentType:
return self.componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
__doc__ = SequenceAndSetBase.__doc__
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing constraints on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = SequenceAndSetBase.getTypeId()
def getComponent(self, innerFlag=False):
return self
def getComponentByType(self, tagSet, default=noValue,
instantiate=True, innerFlag=False):
"""Returns |ASN.1| type component by ASN.1 tag.
Parameters
----------
tagSet : :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tags to identify one of
|ASN.1| object component
Keyword Args
------------
default: :class:`object`
If set and requested component is a schema object, return the `default`
object instead of the requested component.
instantiate: :class:`bool`
If `True` (default), inner component will be automatically instantiated.
If 'False' either existing component or the `noValue` object will be
returned.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a pyasn1 object
"""
componentValue = self.getComponentByPosition(
self.componentType.getPositionByType(tagSet),
default=default, instantiate=instantiate
)
if innerFlag and isinstance(componentValue, Set):
# get inner component by inner tagSet
return componentValue.getComponent(innerFlag=True)
else:
# get outer component by inner tagSet
return componentValue
def setComponentByType(self, tagSet, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True,
innerFlag=False):
"""Assign |ASN.1| type component by ASN.1 tag.
Parameters
----------
tagSet : :py:class:`~pyasn1.type.tag.TagSet`
Object representing ASN.1 tags to identify one of
|ASN.1| object component
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
innerFlag: :class:`bool`
If `True`, search for matching *tagSet* recursively.
Returns
-------
self
"""
idx = self.componentType.getPositionByType(tagSet)
if innerFlag: # set inner component by inner tagSet
componentType = self.componentType.getTypeByPosition(idx)
if componentType.tagSet:
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
else:
componentType = self.getComponentByPosition(idx)
return componentType.setComponentByType(
tagSet, value, verifyConstraints, matchTags, matchConstraints, innerFlag=innerFlag
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints, matchTags, matchConstraints
)
@property
def componentTagMap(self):
if self.componentType:
return self.componentType.tagMapUnique
class Choice(Set):
"""Create |ASN.1| type.
|ASN.1| objects are mutable and duck-type Python :class:`dict` objects.
Keyword Args
------------
componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
Object holding named ASN.1 types allowed within this collection
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
sizeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing collection size constraint
Examples
--------
.. code-block:: python
class Afters(Choice):
'''
ASN.1 specification:
Afters ::= CHOICE {
cheese [0] IA5String,
dessert [1] IA5String
}
'''
componentType = NamedTypes(
NamedType('cheese', IA5String().subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
),
NamedType('dessert', IA5String().subtype(
implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
)
)
afters = Afters()
afters['cheese'] = 'Mascarpone'
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.TagSet() # untagged
#: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
#: object representing ASN.1 type allowed within |ASN.1| type
componentType = namedtype.NamedTypes()
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
#: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
#: object imposing size constraint on |ASN.1| objects
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
# Disambiguation ASN.1 types identification
typeId = Set.getTypeId()
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self):
return self._componentValues and True or False
else:
def __bool__(self):
return self._componentValues and True or False
def __len__(self):
return self._currentIdx is not None and 1 or 0
def __contains__(self, key):
if self._currentIdx is None:
return False
return key == self.componentType[self._currentIdx].getName()
def __iter__(self):
if self._currentIdx is None:
raise StopIteration
yield self.componentType[self._currentIdx].getName()
# Python dict protocol
def values(self):
if self._currentIdx is not None:
yield self._componentValues[self._currentIdx]
def keys(self):
if self._currentIdx is not None:
yield self.componentType[self._currentIdx].getName()
def items(self):
if self._currentIdx is not None:
yield self.componentType[self._currentIdx].getName(), self[self._currentIdx]
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
component = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(component, Choice):
tagSet = component.effectiveTagSet
else:
tagSet = component.tagSet
if isinstance(component, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, component.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, component.clone())
def getComponentByPosition(self, idx, default=noValue, instantiate=True):
__doc__ = Set.__doc__
if self._currentIdx is None or self._currentIdx != idx:
return Set.getComponentByPosition(self, idx, default=default,
instantiate=instantiate)
return self._componentValues[idx]
def setComponentByPosition(self, idx, value=noValue,
verifyConstraints=True,
matchTags=True,
matchConstraints=True):
"""Assign |ASN.1| type component by position.
Equivalent to Python sequence item assignment operation (e.g. `[]`).
Parameters
----------
idx: :class:`int`
Component index (zero-based). Must either refer to existing
component or to N+1 component. In the latter case a new component
type gets instantiated (if *componentType* is set, or given ASN.1
object is taken otherwise) and appended to the |ASN.1| sequence.
Keyword Args
------------
value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
A Python value to initialize |ASN.1| component with (if *componentType* is set)
or ASN.1 value object to assign to |ASN.1| component. Once a new value is
set to *idx* component, previous value is dropped.
verifyConstraints : :class:`bool`
If `False`, skip constraints validation
matchTags: :class:`bool`
If `False`, skip component tags matching
matchConstraints: :class:`bool`
If `False`, skip component constraints matching
Returns
-------
self
"""
oldIdx = self._currentIdx
Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
self._currentIdx = idx
if oldIdx is not None and oldIdx != idx:
self._componentValues[oldIdx] = noValue
return self
@property
def effectiveTagSet(self):
"""Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged)."""
if self.tagSet:
return self.tagSet
else:
component = self.getComponent()
return component.effectiveTagSet
@property
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
if self.tagSet:
return Set.tagMap.fget(self)
else:
return self.componentType.tagMapUnique
def getComponent(self, innerFlag=False):
"""Return currently assigned component of the |ASN.1| object.
Returns
-------
: :py:class:`~pyasn1.type.base.PyAsn1Item`
a PyASN1 object
"""
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=False):
"""Return the name of currently assigned component of the |ASN.1| object.
Returns
-------
: :py:class:`str`
|ASN.1| component name
"""
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self.componentType.getNameByPosition(self._currentIdx)
@property
def isValue(self):
"""Indicate that |ASN.1| object represents ASN.1 value.
If *isValue* is `False` then this object represents just ASN.1 schema.
If *isValue* is `True` then, in addition to its ASN.1 schema features,
this object can also be used like a Python built-in object (e.g. `int`,
`str`, `dict` etc.).
Returns
-------
: :class:`bool`
:class:`False` if object represents just ASN.1 schema.
:class:`True` if object represents ASN.1 schema and can be used as a normal value.
Note
----
There is an important distinction between PyASN1 schema and value objects.
The PyASN1 schema objects can only participate in ASN.1 schema-related
operations (e.g. defining or testing the structure of the data). Most
obvious uses of ASN.1 schema is to guide serialisation codecs whilst
encoding/decoding serialised ASN.1 contents.
The PyASN1 value objects can **additionally** participate in many operations
involving regular Python objects (e.g. arithmetic, comprehension etc).
"""
if self._currentIdx is None:
return False
componentValue = self._componentValues[self._currentIdx]
return componentValue is not noValue and componentValue.isValue
def clear(self):
self._currentIdx = None
Set.clear(self)
# compatibility stubs
def getMinTagSet(self):
return self.minTagSet
class Any(OctetString):
"""Create |ASN.1| schema or value object.
|ASN.1| objects are immutable and duck-type Python 2 :class:`str` or Python 3
:class:`bytes`. When used in Unicode context, |ASN.1| type assumes "|encoding|"
serialisation.
Keyword Args
------------
value: :class:`str`, :class:`bytes` or |ASN.1| object
string (Python 2) or bytes (Python 3), alternatively unicode object
(Python 2) or string (Python 3) representing character string to be
serialised into octets (note `encoding` parameter) or |ASN.1| object.
tagSet: :py:class:`~pyasn1.type.tag.TagSet`
Object representing non-default ASN.1 tag(s)
subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
Object representing non-default ASN.1 subtype constraint(s)
encoding: :py:class:`str`
Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
:class:`str` (Python 3) the payload when |ASN.1| object is used
in text string context.
binValue: :py:class:`str`
Binary string initializer to use instead of the *value*.
Example: '10110011'.
hexValue: :py:class:`str`
Hexadecimal string initializer to use instead of the *value*.
Example: 'DEADBEEF'.
Raises
------
:py:class:`~pyasn1.error.PyAsn1Error`
On constraint violation or bad initializer.
Examples
--------
.. code-block:: python
class Error(Sequence):
'''
ASN.1 specification:
Error ::= SEQUENCE {
code INTEGER,
parameter ANY DEFINED BY code -- Either INTEGER or REAL
}
'''
componentType=NamedTypes(
NamedType('code', Integer()),
NamedType('parameter', Any(),
openType=OpenType('code', {1: Integer(),
2: Real()}))
)
error = Error()
error['code'] = 1
error['parameter'] = Integer(1234)
"""
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
#: associated with |ASN.1| type.
tagSet = tag.TagSet() # untagged
#: Set (on class, not on instance) or return a
#: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
#: imposing constraints on |ASN.1| type initialization values.
subtypeSpec = constraint.ConstraintsIntersection()
# Disambiguation ASN.1 types identification
typeId = OctetString.getTypeId()
@property
def tagMap(self):
""""Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
ASN.1 tags to ASN.1 objects contained within callee.
"""
try:
return self._tagMap
except AttributeError:
self._tagMap = tagmap.TagMap(
{self.tagSet: self},
{eoo.endOfOctets.tagSet: eoo.endOfOctets},
self
)
return self._tagMap
# XXX
# coercion rules?
| gpl-3.0 |
grepman/detective.io | app/detective/migrations/0047_auto__add_field_topicskeleton_order.py | 3 | 13355 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TopicSkeleton.order'
db.add_column(u'detective_topicskeleton', 'order',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TopicSkeleton.order'
db.delete_column(u'detective_topicskeleton', 'order')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'detective.article': {
'Meta': {'object_name': 'Article'},
'content': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.detectiveprofileuser': {
'Meta': {'object_name': 'DetectiveProfileUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '10'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'detective.quoterequest': {
'Meta': {'object_name': 'QuoteRequest'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'employer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'records': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'detective.searchterm': {
'Meta': {'object_name': 'SearchTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_literal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'subject': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
},
u'detective.subscription': {
'Meta': {'object_name': 'Subscription'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identification': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'plan': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'siret': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'vat': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'detective.topic': {
'Meta': {'unique_together': "(('slug', 'author'),)", 'object_name': 'Topic'},
'about': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'administrator_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'admin_topic'", 'null': 'True', 'to': u"orm['auth.Group']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contributor_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.TopicDataSet']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology_as_json': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'ontology_as_mod': ('django.db.models.fields.SlugField', [], {'max_length': '250', 'blank': 'True'}),
'ontology_as_owl': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'skeleton_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'detective.topicdataset': {
'Meta': {'object_name': 'TopicDataSet'},
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target_plans': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'target_skeletons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'datasets'", 'symmetrical': 'False', 'to': u"orm['detective.TopicSkeleton']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'zip_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'detective.topicskeleton': {
'Meta': {'object_name': 'TopicSkeleton'},
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'enable_teasing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ontology': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'picture_credits': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'schema_picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'target_plans': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tutorial_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'detective.topictoken': {
'Meta': {'unique_together': "(('topic', 'email'),)", 'object_name': 'TopicToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['detective.Topic']"})
}
}
complete_apps = ['detective'] | lgpl-3.0 |
unix1986/universe | script/template.py | 2 | 1383 | #!/home/search/ttool/py27/bin/python
# func:
__author__ = "unix1986@qq.com"
import sys
import platform
# check python version
if sys.version_info < (2, 7, 0):
print ("Current python version %s, "
"but require version %s or above") % \
(platform.python_version(), "2.7.0")
sys.exit(1)
import os
import argparse
'''
import subprocess
import time
import datetime
import re
'''
# parse command line arguments
def ArgParse():
parser = argparse.ArgumentParser(
description = "Compute qps by monitoring a access log file",
epilog = "Author: unix1986 Homepage: https://github.com/unix1986")
'''
parser.add_argument(
"logfile",
help = "for example: /path/access.qfedd.log")
parser.add_argument(
"-i", "--interval", type = int,
help = "interval time (s) for statistics")
parser.add_argument(
"-e", "--expr",
help = "regular expression for getting special lines")
parser.add_argument(
"-r", "--reverse", action = "store_true",
help = "reverse regular expression for filtering special lines, must be used with -e")
parser.add_argument(
"-f", "--flush", action = "store_true",
help = "flush file buffer")
'''
return parser.parse_args()
# main func
def Main(args):
pass
# entrance
if __name__ == "__main__":
Main(ArgParse())
| bsd-2-clause |
djw8605/condor | src/condor_contrib/mgmt/qmf/test/job_server-perf.py | 9 | 2502 | #!/usr/bin/env python
#
# Copyright 2009-2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qmf.console import Session, Console
from sys import exit, argv
import time
import threading
lock = threading.Lock()
condition = threading.Condition(lock)
class EmptyConsole(Console):
def __init__(self):
self.count = 0
def methodResponse(self, broker, seq, response):
condition.acquire()
self.count += 1
condition.notify()
condition.release()
# print broker, seq, response
url = len(argv) > 1 and argv[1] or "amqp://localhost:5672"
count = len(argv) > 2 and int(argv[2]) or 10000
submission = len(argv) > 3 and argv[3] or "submission"
klass = "JobServer"
package = "com.redhat.grid"
console = EmptyConsole()
session = Session(console);
broker = session.addBroker(url)
servers = session.getObjects(_class=klass, _package=package)
server = servers[0]
print "Found server:", server
print "Iteractions:", count
print "Submission:", submission
start = time.time()
print "Start:", start
for i in xrange(count):
r = session._sendMethodRequest(server.getBroker(),
server.getClassKey(),
server.getObjectId(),
"GetJobAds",
(submission,))
# r = server.GetJobs(submission);
# print r
# r = echoer.echo(i, message, _async=True)
# if r.status or r.sequence != i:
# print "text:", r.text, "sequence:", r.sequence, "(expected:", i, ")"
# count = i
# break
# if r.body != message:
# print "message mismatch, received:", r.body
# count = i
# break
condition.acquire()
while console.count < count:
condition.wait()
condition.release()
end = time.time()
print "End:", end
print "Total messages:", console.count, count
print "Total time:", end-start, "seconds"
print "Response rate:", count/(end-start), "Hz"
session.delBroker(broker)
| apache-2.0 |
sky15179/Debug | TestTabelController/my-python/relaceGuideImages.py | 1 | 3738 | #!/usr/bin/python
# coding:utf-8
import os
import re
import fnmatch
import zipfile
import shutil
import getpass
import glob
PATH = "/Users/"+ getpass.getuser() + "/Downloads"
ProtectImagePath = "/Users/wzg/Downloads/testimage/help_images"
prefix = "FS_HelpGuide_"
#获取替换文件的路径
def realProtectImagePath():
global ProtectImagePath
if os.path.isdir(ProtectImagePath):
pass
else:
inputContent = raw_input("请输入待替换图片文件的路径:")
if os.path.isdir(ProtectImagePath):
ProtectImagePath = inputContent
pass
#删除已有图片文件夹
def deleteExistDirs():
# '''delete files and folders'''
for path,dirs,files in os.walk(PATH):
for secDir in dirs:
if fnmatch.fnmatch(secDir,'*引导*'):
turePath = os.path.join(PATH,secDir)
shutil.rmtree(turePath)
pass
#解压操作
def unzip(file_name):
# """unzip zip file"""
zip_file = zipfile.ZipFile(file_name)
zipDirName = file_name.replace('.zip','',1)
if os.path.isdir(zipDirName):
pass
else:
os.mkdir(zipDirName)
for names in zip_file.namelist():
if names.startswith('__MACOSX/'):
continue
zip_file.extract(names,zipDirName)
zip_file.close()
# zip_file.printdir()
pass
#解压得路径
def unzipImages():
for filename in os.listdir(PATH):
if fnmatch.fnmatch(filename,'*引导*'):
return os.path.join(PATH,filename)
pass
#获取zip包的路径
def realPath():
for path,dirs,files in os.walk(PATH):
for secDir in dirs:
if fnmatch.fnmatch(secDir,'*引导*'):
# print '压缩包' + secDir
turePath = os.path.join(PATH,secDir)
# print '真实路径:' + turePath
return turePath
pass
# 替换文件名
def rename_fils(turePath):
for path,secdirs,files in os.walk(turePath):
for subDir in secdirs:
subPath = os.path.join(turePath,subDir)
for subfile in os.listdir(subPath):
# print '文件:' + subfile
subfilePath = os.path.join(subPath,subfile)
if os.path.isfile(subfilePath):
if '.DS_Store' not in subfile:
newName = os.path.join(subPath,prefix+subDir+'_'+subfile.replace('0','',1))
os.rename(subfilePath,newName)
pass
# 根目录下得文件完整路径
def fileInDirPath(turePath):
fileList = []
for path,secdirs,files in os.walk(turePath):
for subDir in secdirs:
subPath = os.path.join(turePath,subDir)
for subfile in os.listdir(subPath):
subfilePath = os.path.join(subPath,subfile)
if os.path.isfile(subfilePath):
if '.DS_Store' not in subfile:
newName = os.path.join(subPath,subfile)
fileList.append(newName)
return fileList
pass
#替换图片
def repalceImages(newImageDirPath,oldImageDirPath):
if (os.path.isdir(newImageDirPath)) and (os.path.isdir(oldImageDirPath)):
for newImageFilePath in fileInDirPath(newImageDirPath):
if os.path.isfile(newImageFilePath):
shutil.copy(newImageFilePath,oldImageDirPath)
print "替换成功" + os.path.basename(newImageFilePath)
pass
if __name__ == '__main__':
deleteExistDirs()
unzipPath = unzipImages()
if os.path.isfile(unzipPath):
unzip(unzipPath)
rename_fils(realPath())
realProtectImagePath()
repalceImages(realPath(),ProtectImagePath)
else:
print '无效解压地址'
| apache-2.0 |
biodrone/plex-desk | desk/flask/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
pcarrier-packaging/deb-phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/cpp_message.py | 259 | 21288 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, sort_function=cmp):
values = self[slice(None, None, None)]
values.sort(sort_function)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, sort_function=cmp):
messages = []
for index in range(len(self)):
# messages[i][0] is where the i-th element of the new array has to come
# from.
# messages[i][1] is where the i-th element of the old array has to go.
messages.append([index, 0, self[index]])
messages.sort(lambda x,y: sort_function(x[2], y[2]))
# Remember which position each elements has to move to.
for i in range(len(messages)):
messages[messages[i][0]][1] = i
# Apply the transposition.
for i in range(len(messages)):
from_position = messages[i][0]
if i == from_position:
continue
self._cmsg.SwapRepeatedFieldElements(
self._cfield_descriptor, i, from_position)
messages[messages[i][1]][0] = from_position
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage is None:
self._cmsg = NewCMessage(message_descriptor.full_name)
else:
self._cmsg = cmessage
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner is not None:
self._owner = owner
self.Extensions = ExtensionDict(self)
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if field_cdescriptor is None:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
for val in field_value:
getattr(self, field_name).add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
if field_name in self._composite_fields:
del self._composite_fields[field_name]
self._cmsg.ClearField(field_name)
def Clear(self):
return self._cmsg.Clear()
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message is missing required fields: ' +
','.join(self.FindInitializationErrors()))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class.")
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return self._cmsg.DebugString()
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
| bsd-3-clause |
wbc2010/django1.2.5 | django/contrib/gis/sitemaps/kml.py | 482 | 2481 | from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
from django.contrib.gis.db.models.fields import GeometryField
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = models.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.module_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label' : obj[0],
'model' : obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
| bsd-3-clause |
jmartinezchaine/OpenERP | openerp/addons/l10n_fr/wizard/fr_report_compute_resultant.py | 7 | 2284 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return { 'type': 'ir.actions.report.xml', 'report_name': 'l10n.fr.compute_resultant', 'datas': data}
account_cdr_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
supercheetah/diceroller | pyinstaller/buildtests/libraries/test_wx_pubsub_kwargs.py | 2 | 1062 | #
# Copyright (C) 2012, Daniel Hyams
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
from wx.lib.pubsub import setupkwargs
from wx.lib.pubsub import pub as Publisher
def on_message(number):
print 'In the handler'
if not number == 762:
raise SystemExit('wx_pubsub_kwargs failed.')
Publisher.subscribe(on_message, 'topic.subtopic')
Publisher.sendMessage('topic.subtopic', number=762)
| artistic-2.0 |
vlegoff/tsunami | src/secondaires/peche/editeurs/__init__.py | 5 | 1621 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant les différents éditeurs"""
| bsd-3-clause |
mengxn/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 9485 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
| apache-2.0 |
a7xtony1/plugin.video.ELECTROMERIDAtv | resources/tools/resolvers.py | 26 | 52080 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# BassFox - Kodi Add-on
# Plugin multimedia para el add-on
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a las librerías de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import string
import shutil
import zipfile
import time
import urlparse
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import scrapertools, plugintools, unwise, unpackerjs, requests
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
from __main__ import *
art = addonPath + "/art/"
def allmyvideos(params):
plugintools.log('[%s %s] Allmyvideos %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
url_fixed = page_url.split("/")
url_fixed = 'http://www.allmyvideos.net/' + 'embed-' + url_fixed[3] + '.html'
plugintools.log("url_fixed= "+url_fixed)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(page_url, headers=headers)
data = r.text
if "<b>File Not Found</b>" in data or "<b>Archivo no encontrado</b>" in data or '<b class="err">Deleted' in data or '<b class="err">Removed' in data or '<font class="err">No such' in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
# Normaliza la URL
videoid = page_url.replace("http://allmyvideos.net/","").replace("https://allmyvideos.net/","").strip()
page_url = "http://allmyvideos.net/embed-"+videoid+"-728x400.html"
#data = scrapertools.cache_page(page_url)
r = requests.get(page_url, headers=headers)
data = r.text
if "File was banned" in data:
#data = scrapertools.cache_page(page_url,post="op=download1&usr_login=&id="+videoid+"&fname=&referer=&method_free=1&x=147&y=25")
payload = {'op': 'download1', 'usr_login': '', 'id': videoid, 'fname': '', 'referer': '', 'method_free': '1', 'x': '147', 'y': '25'}
r = requests.get(page_url, params=payload)
data = r.text
# Extrae la URL
match = re.compile('"file" : "(.+?)",').findall(data)
media_url = ""
if len(match) > 0:
for tempurl in match:
if not tempurl.endswith(".png") and not tempurl.endswith(".srt"):
media_url = tempurl
print media_url
if media_url == "":
media_url = match[0]
print media_url
if media_url!="":
media_url+= "&direct=false"
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
def streamcloud(params):
plugintools.log('[%s %s]Streamcloud %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("data= "+body)
# Barra de progreso para la espera de 10 segundos
progreso = xbmcgui.DialogProgress()
progreso.create("Bassfox", "Abriendo Streamcloud..." , url )
i = 13000
j = 0
percent = 0
while j <= 13000 :
percent = ((j + ( 13000 / 10.0 )) / i)*100
xbmc.sleep(i/10) # 10% = 1,3 segundos
j = j + ( 13000 / 10.0 )
msg = "Espera unos segundos, por favor... "
percent = int(round(percent))
print percent
progreso.update(percent, "" , msg, "")
progreso.close()
media_url = plugintools.find_single_match(body , 'file\: "([^"]+)"')
if media_url == "":
op = plugintools.find_single_match(body,'<input type="hidden" name="op" value="([^"]+)"')
usr_login = ""
id = plugintools.find_single_match(body,'<input type="hidden" name="id" value="([^"]+)"')
fname = plugintools.find_single_match(body,'<input type="hidden" name="fname" value="([^"]+)"')
referer = plugintools.find_single_match(body,'<input type="hidden" name="referer" value="([^"]*)"')
hashstring = plugintools.find_single_match(body,'<input type="hidden" name="hash" value="([^"]*)"')
imhuman = plugintools.find_single_match(body,'<input type="submit" name="imhuman".*?value="([^"]+)">').replace(" ","+")
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
request_headers.append(["Referer",url])
body,response_headers = plugintools.read_body_and_headers(url, post=post, headers=request_headers)
plugintools.log("data= "+body)
# Extrae la URL
media_url = plugintools.find_single_match( body , 'file\: "([^"]+)"' )
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
if 'id="justanotice"' in body:
plugintools.log("[streamcloud.py] data="+body)
plugintools.log("[streamcloud.py] Ha saltado el detector de adblock")
return -1
def playedto(params):
plugintools.log('[%s %s] Played.to %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.split("/")
url_fixed = "http://played.to/embed-" + url[3] + "-640x360.html"
plugintools.log("url_fixed= "+url_fixed)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url_fixed, headers=request_headers)
body = body.strip()
if body == "<center>This video has been deleted. We apologize for the inconvenience.</center>":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Enlace borrado...", 3 , art+'icon.png'))
elif body.find("Removed for copyright infringement") >= 0:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Removed for copyright infringement", 3 , art+'icon.png'))
else:
r = re.findall('file(.+?)\n', body)
for entry in r:
entry = entry.replace('",', "")
entry = entry.replace('"', "")
entry = entry.replace(': ', "")
entry = entry.strip()
plugintools.log("vamos= "+entry)
if entry.endswith("flv"):
plugintools.play_resolved_url(entry)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Resolviendo enlace...", 3 , art+'icon.png'))
params["url"]=entry
plugintools.log("URL= "+entry)
def vidspot(params):
plugintools.log('[%s %s] Vidspot %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.split("/")
url_fixed = 'http://www.vidspot.net/' + 'embed-' + url[3] + '.html'
plugintools.log("url_fixed= "+url_fixed)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(url_fixed, headers=headers)
body = r.text
try:
if body.find("File was deleted") >= 0:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado", 3 , art+'icon.png'))
else:
r = re.findall('"file" : "(.+?)"', body)
for entry in r:
plugintools.log("vamos= "+entry)
if entry.endswith("mp4?v2"):
url = entry + '&direct=false'
params["url"]=url
plugintools.log("url= "+url)
plugintools.play_resolved_url(url)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Resolviendo enlace...", 3 , art+'icon.png'))
except:
pass
def vk(params):
plugintools.log('[%s %s] Vk %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
#url = url.replace('http://', 'https://')
#data = data.replace("amp;", "")
#data = scrapertools.cache_page(page_url)
data = plugintools.read(page_url)
plugintools.log("data= "+data)
if "This video has been removed from public access" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
#data = scrapertools.cache_page(page_url.replace("amp;",""))
data = plugintools.read(page_url.replace("amp;",""))
plugintools.log("data= "+data)
videourl = ""
match = plugintools.find_single_match(data, r'vkid=([^\&]+)\&')
print match
vkid = ""
# Lee la página y extrae el ID del vídeo
data2 = data.replace("\\","")
patron = '"vkid":"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data2)
if len(matches)>0:
vkid = matches[0]
else:
plugintools.log("No encontró vkid")
plugintools.log("vkid="+vkid)
# Extrae los parámetros del vídeo y añade las calidades a la lista
patron = "var video_host = '([^']+)'.*?"
patron += "var video_uid = '([^']+)'.*?"
patron += "var video_vtag = '([^']+)'.*?"
patron += "var video_no_flv = ([^;]+);.*?"
patron += "var video_max_hd = '([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
print matches
if len(matches)>0:
#01:44:52 T:2957156352 NOTICE: video_host=http://cs509601.vk.com/, video_uid=149623387, video_vtag=1108941f4c, video_no_flv=1, video_max_hd=1
video_host = matches[0][0]
video_uid = matches[0][1]
video_vtag = matches[0][2]
video_no_flv = matches[0][3]
video_max_hd = matches[0][4]
else:
#{"uid":"97482389","vid":"161509127\",\"oid\":\"97482389\","host":"507214",\"vtag\":\"99bca9d028\",\"ltag\":\"l_26f55018\",\"vkid\":\"161509127\",\"md_title\":\"El Libro de La Selva - 1967 - tetelx - spanish\",\"md_author\":\"Tetelx Tete\",\"hd\":1,\"no_flv\":1,\"hd_def\":-1,\"dbg_on\":0,\"t\":\"\",\"thumb\":\"http:\\\/\\\/cs507214.vkontakte.ru\\\/u97482389\\\/video\\\/l_26f55018.jpg\",\"hash\":\"3a576695e9f0bfe3093eb21239bd322f\",\"hash2\":\"be750b8971933dd6\",\"is_vk\":\"1\",\"is_ext\":\"0\",\"lang_add\":\"Add to My Videos\",\"lang_share\":\"Share\",\"lang_like\":\"Like\",\"lang_volume_on\":\"Unmute\",\"lang_volume_off\":\"Mute\",\"lang_volume\":\"Volume\",\"lang_hdsd\":\"Change Video Quality\",\"lang_fullscreen\":\"Full Screen\",\"lang_window\":\"Minimize\",\"lang_rotate\":\"Rotate\",\"video_play_hd\":\"Watch in HD\",\"video_stop_loading\":\"Stop Download\",\"video_player_version\":\"VK Video Player\",\"video_player_author\":\"Author - Alexey Kharkov\",\"goto_orig_video\":\"Go to Video\",\"video_get_video_code\":\"Copy vdeo code\",\"video_load_error\":\"The video has not uploaded yet or the server is not available\",\"video_get_current_url\":\"Copy frame link\",\"nologo\":1,\"liked\":0,\"add_hash\":\"67cd39a080ad6e0ad7\",\"added\":1,\"use_p2p\":0,\"p2p_group_id\":\"fb2d8cfdcbea4f3c\"}
#01:46:05 T:2955558912 NOTICE: video_host=507214, video_uid=97482389, video_vtag=99bca9d028, video_no_flv=1, video_max_hd=1
data2 = data.replace("\\","")
video_host = scrapertools.get_match(data2,'"host":"([^"]+)"')
video_uid = scrapertools.get_match(data2,'"uid":"([^"]+)"')
video_vtag = scrapertools.get_match(data2,'"vtag":"([^"]+)"')
video_no_flv = scrapertools.get_match(data2,'"no_flv":([0-9]+)')
video_max_hd = scrapertools.get_match(data2,'"hd":([0-9]+)')
if not video_host.startswith("http://"):
video_host = "http://cs"+video_host+".vk.com/"
plugintools.log("video_host="+video_host+", video_uid="+video_uid+", video_vtag="+video_vtag+", video_no_flv="+video_no_flv+", video_max_hd="+video_max_hd)
video_urls = []
if video_no_flv.strip() == "0" and video_uid != "0":
tipo = "flv"
if "http://" in video_host:
videourl = "%s/u%s/video/%s.%s" % (video_host,video_uid,video_vtag,tipo)
else:
videourl = "http://%s/u%s/video/%s.%s" % (video_host,video_uid,video_vtag,tipo)
# Lo añade a la lista
video_urls.append( ["FLV [vk]",videourl])
elif video_uid== "0" and vkid != "": #http://447.gt3.vkadre.ru/assets/videos/2638f17ddd39-75081019.vk.flv
tipo = "flv"
if "http://" in video_host:
videourl = "%s/assets/videos/%s%s.vk.%s" % (video_host,video_vtag,vkid,tipo)
else:
videourl = "http://%s/assets/videos/%s%s.vk.%s" % (video_host,video_vtag,vkid,tipo)
# Lo añade a la lista
video_urls.append( ["FLV [vk]",videourl])
else: #http://cs12385.vkontakte.ru/u88260894/video/d09802a95b.360.mp4
#Se reproducirá el stream encontrado de mayor calidad
if video_max_hd=="3":
plugintools.log("Vamos a por el vídeo 720p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"720.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"720.mp4")
plugintools.log("videourl= "+videourl)
elif video_max_hd=="2":
plugintools.log("Vamos a por el vídeo 480p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"480.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"480.mp4")
plugintools.log("videourl= "+videourl)
elif video_max_hd=="1":
plugintools.log("Vamos a por el vídeo 360p")
if video_host.endswith("/"):
videourl = "%su%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"360.mp4")
else:
videourl = "%s/u%s/videos/%s.%s" % (video_host,video_uid,video_vtag,"360.mp4")
plugintools.log("videourl= "+videourl)
plugintools.play_resolved_url(videourl)
plugintools.log("videourl= "+videourl)
def nowvideo(params):
plugintools.log('[%s %s] Nowvideo %s' % (addonName, addonVersion, repr(params)))
data = plugintools.read(params.get("url"))
#data = data.replace("amp;", "")
if "The file is being converted" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "El archivo está en proceso", 3 , art+'icon.png'))
elif "no longer exists" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "El archivo ha sido borrado", 3 , art+'icon.png'))
else:
#plugintools.log("data= "+data)
domain = plugintools.find_single_match(data, 'flashvars.domain="([^"]+)')
video_id = plugintools.find_single_match(data, 'flashvars.file="([^"]+)')
filekey = plugintools.find_single_match(data, 'flashvars.filekey=([^;]+)')
# En la página nos da el token de esta forma (siendo fkzd el filekey): var fkzd="83.47.1.12-8d68210314d70fb6506817762b0d495e";
token_txt = 'var '+filekey
#plugintools.log("token_txt= "+token_txt)
token = plugintools.find_single_match(data, filekey+'=\"([^"]+)')
token = token.replace(".","%2E").replace("-","%2D")
#plugintools.log("domain= "+domain)
#plugintools.log("video_id= "+video_id)
#plugintools.log("filekey= "+filekey)
#plugintools.log("token= "+token)
if video_id == "":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Error!", 3 , art+'icon.png'))
else:
#http://www.nowvideo.sx/api/player.api.php?user=undefined&pass=undefined&cid3=undefined&numOfErrors=0&cid2=undefined&key=83%2E47%2E1%2E12%2D8d68210314d70fb6506817762b0d495e&file=b5c8c44fc706f&cid=1
url = 'http://www.nowvideo.sx/api/player.api.php?user=undefined&pass=undefined&cid3=undefined&numOfErrors=0&cid2=undefined&key=' + token + '&file=' + video_id + '&cid=1'
# Vamos a lanzar una petición HTTP de esa URL
referer = 'http://www.nowvideo.sx/video/b5c8c44fc706f'
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
# plugintools.log("data= "+body)
# body= url=http://s173.coolcdn.ch/dl/04318aa973a3320b8ced6734f0c20da3/5440513e/ffe369cb0656c0b8de31f6ef353bcff192.flv&title=The.Black.Rider.Revelation.Road.2014.DVDRip.X264.AC3PLAYNOW.mkv%26asdasdas&site_url=http://www.nowvideo.sx/video/b5c8c44fc706f&seekparm=&enablelimit=0
body = body.replace("url=", "")
body = body.split("&")
if len(body) >= 0:
print 'body',body
url = body[0]
plugintools.play_resolved_url(url)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Cargando vídeo...", 1 , art+'icon.png'))
else:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Error!", 3 , art+'icon.png'))
''' En el navegador...
flashvars.domain="http://www.nowvideo.sx";
flashvars.file="b5c8c44fc706f";
flashvars.filekey=fkzd;
flashvars.advURL="0";
flashvars.autoplay="false";
flashvars.cid="1";
'''
def tumi(params):
plugintools.log('[%s %s] Tumi %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
if "Video is processing now" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "El archivo está en proceso", 3 , art+'icon.png'))
else:
try:
x = scrapertools.find_single_match(data, "\|type\|(.*?)\|file\|").replace("||","|").split("|")
n = scrapertools.find_single_match(data, "//k.j.h.([0-9]+):g/p/v.o")
printf = "http://%s.%s.%s.%s:%s/%s/%s.%s"
if n:
url = printf % (x[3], x[2], x[1], n, x[0], x[8], "v", x[7])
else:
url = printf % (x[4], x[3], x[2], x[1], x[0], x[9], "v", x[8])
except:
url = scrapertools.find_single_match(data, "file:'([^']+)'")
plugintools.log("url_final= "+url)
plugintools.play_resolved_url(url)
def veehd(params):
plugintools.log('[%s %s] VeeHD %s' % (addonName, addonVersion, repr(params)))
uname = plugintools.get_setting("veehd_user")
pword = plugintools.get_setting("veehd_pword")
if uname == '' or pword == '':
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Debes configurar el identificador para Veehd.com", 3 , art+'icon.png'))
return
url = params.get("url")
url_login = 'http://veehd.com/login'
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",url])
post = {'ref': url, 'uname': uname, 'pword': pword, 'submit': 'Login', 'terms': 'on'}
post = urllib.urlencode(post)
body,response_headers = plugintools.read_body_and_headers(url_login, post=post, headers=request_headers, follow_redirects=True)
vpi = plugintools.find_single_match(body, '"/(vpi.+?h=.+?)"')
if not vpi:
if 'type="submit" value="Login" name="submit"' in body:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Error al identificarse en Veehd.com", 3 , art+'icon.png'))
else:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Error buscando el video en Veehd.com", 3 , art+'icon.png'))
return
req = urllib2.Request('http://veehd.com/'+vpi)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
response = urllib2.urlopen(req)
body = response.read()
response.close()
va = plugintools.find_single_match(body, '"/(va/.+?)"')
if va:
req = urllib2.Request('http://veehd.com/'+va)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
urllib2.urlopen(req)
req = urllib2.Request('http://veehd.com/'+vpi)
for header in request_headers:
req.add_header(header[0], header[1]) # User-Agent
response = urllib2.urlopen(req)
body = response.read()
response.close()
video_url = False
if 'application/x-shockwave-flash' in body:
video_url = urllib.unquote(plugintools.find_single_match(body, '"url":"(.+?)"'))
elif 'video/divx' in body:
video_url = urllib.unquote(plugintools.find_single_match(body, 'type="video/divx"\s+src="(.+?)"'))
if not video_url:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Error abriendo el video en Veehd.com", 3 , art+'icon.png'))
return
plugintools.log("video_url= "+video_url)
plugintools.play_resolved_url(video_url)
def streaminto(params):
plugintools.log('[%s %s] streaminto %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
if page_url.startswith("http://streamin.to/embed-") == False:
videoid = plugintools.find_single_match(page_url,"streamin.to/([a-z0-9A-Z]+)")
page_url = "http://streamin.to/embed-"+videoid+".html"
plugintools.log("page_url= "+page_url)
# Leemos el código web
headers = {'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'}
r = requests.get(page_url, headers=headers)
data = r.text
plugintools.log("data= "+data)
if data == "File was deleted":
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
# TODO: Si "video not found" en data, mostrar mensaje "Archivo borrado!"
patron_flv = 'file: "([^"]+)"'
patron_jpg = 'image: "(http://[^/]+/)'
try:
host = scrapertools.get_match(data, patron_jpg)
plugintools.log("[streaminto.py] host="+host)
flv_url = scrapertools.get_match(data, patron_flv)
plugintools.log("[streaminto.py] flv_url="+flv_url)
flv = host+flv_url.split("=")[1]+"/v.flv"
plugintools.log("[streaminto.py] flv="+flv)
page_url = flv
except:
plugintools.log("[streaminto] opcion 2")
op = plugintools.find_single_match(data,'<input type="hidden" name="op" value="([^"]+)"')
plugintools.log("[streaminto] op="+op)
usr_login = ""
id = plugintools.find_single_match(data,'<input type="hidden" name="id" value="([^"]+)"')
plugintools.log("[streaminto] id="+id)
fname = plugintools.find_single_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
plugintools.log("[streaminto] fname="+fname)
referer = plugintools.find_single_match(data,'<input type="hidden" name="referer" value="([^"]*)"')
plugintools.log("[streaminto] referer="+referer)
hashstring = plugintools.find_single_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
plugintools.log("[streaminto] hashstring="+hashstring)
imhuman = plugintools.find_single_match(data,'<input type="submit" name="imhuman".*?value="([^"]+)"').replace(" ","+")
plugintools.log("[streaminto] imhuman="+imhuman)
import time
time.sleep(10)
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=z3nnqbspjyne&fname=Coriolanus_DVDrip_Castellano_by_ARKONADA.avi&referer=&hash=nmnt74bh4dihf4zzkxfmw3ztykyfxb24&imhuman=Continue+to+Video
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashstring+"&imhuman="+imhuman
request_headers.append(["Referer",page_url])
data_video = plugintools.read_body_and_headers( page_url , post=post, headers=request_headers )
data_video = data_video[0]
rtmp = plugintools.find_single_match(data_video, 'streamer: "([^"]+)"')
print 'rtmp',rtmp
video_id = plugintools.find_single_match(data_video, 'file: "([^"]+)"')
print 'video_id',video_id
swf = plugintools.find_single_match(data_video, 'src: "(.*?)"')
print 'swf',swf
page_url = rtmp+' swfUrl='+swf + ' playpath='+video_id+"/v.flv"
plugintools.play_resolved_url(page_url)
def powvideo(params):
plugintools.log('[%s %s] Powvideo %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
# Leemos el código web
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
#plugintools.log("data= "+data)
'''
<input type="hidden" name="op" value="download1">
<input type="hidden" name="usr_login" value="">
<input type="hidden" name="id" value="auoxxtvyquoy">
<input type="hidden" name="fname" value="Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi">
<input type="hidden" name="referer" value="">
<input type="hidden" name="hash" value="1624-83-46-1377796069-b5e6b8f9759d080a3667adad637f00ac">
<input type="submit" name="imhuman" value="Continue to Video" id="btn_download">
'''
option = plugintools.find_single_match(data,'<input type="hidden" name="op" value="(down[^"]+)')
usr_login = ""
id = plugintools.find_single_match(data,'<input type="hidden" name="id" value="([^"]+)')
fname = plugintools.find_single_match(data,'<input type="hidden" name="fname" value="([^"]+)')
referer = plugintools.find_single_match(data,'<input type="hidden" name="referer" value="([^"]*)')
hashvalue = plugintools.find_single_match(data,'<input type="hidden" name="hash" value="([^"]*)')
submitbutton = plugintools.find_single_match(data,'<input type="submit" name="imhuman" value="([^"]+)').replace(" ","+")
time.sleep(30)
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796019-c2b422f91da55d12737567a14ea3dffe&imhuman=Continue+to+Video
#op=search&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796398-8020e5629f50ff2d7b7de99b55bdb177&imhuman=Continue+to+Video
post = "op="+option+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashvalue+"&imhuman="+submitbutton
plugintools.log("post= "+post)
headers.append(["Referer",page_url])
data = scrapertools.cache_page(page_url, post=post, headers=headers)
plugintools.log("data= "+data)
# Extrae la URL
data = plugintools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
plugintools.log("data= "+data)
from resources.tools.jsunpack import *
data = unpack(data)
data = data.replace("\\","")
media_url = plugintools.find_single_match(data,"file:'([^']+)'")
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
def mailru(params):
plugintools.log('[%s %s] Mail.ru %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
url = url.replace('/my.mail.ru/video/', '/api.video.mail.ru/videos/embed/')
url = url.replace('/videoapi.my.mail.ru/', '/api.video.mail.ru/')
plugintools.log("URL = "+url)
result = getUrl(url).result
plugintools.log("result= "+result)
url = re.compile('metadataUrl":"(.+?)"').findall(result)[0]
cookie = getUrl(url, output='cookie').result
h = "|Cookie=%s" % urllib.quote(cookie)
result = getUrl(url).result
plugintools.log("result= "+result)
#result = json.loads(result)
result = data['videos']
url = []
url += [{'quality': '1080p', 'url': i['url'] + h} for i in result if i['key'] == '1080p']
url += [{'quality': 'HD', 'url': i['url'] + h} for i in result if i['key'] == '720p']
url += [{'quality': 'SD', 'url': i['url'] + h} for i in result if not (i['key'] == '1080p' or i ['key'] == '720p')]
#if url == []: return
plugintools.play_resolved_url(url)
def mediafire(params):
plugintools.log('[%s %s] Mediafire %s' % (addonName, addonVersion, repr(params)))
# Solicitud de página web
url = params.get("url")
data = plugintools.read(url)
# Espera un segundo y vuelve a cargar
plugintools.log("[Bassfox] Espere un segundo...")
import time
time.sleep(1)
data = plugintools.read(url)
plugintools.log("data= "+data)
pattern = 'kNO \= "([^"]+)"'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
plugintools.log("entry= "+entry)
# Tipo 1 - http://www.mediafire.com/download.php?4ddm5ddriajn2yo
pattern = 'mediafire.com/download.php\?([a-z0-9]+)'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 1 = "+url)
'''
# Tipo 2 - http://www.mediafire.com/?4ckgjozbfid
pattern = 'http://www.mediafire.com/\?([a-z0-9]+)'
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 2 = "+url)
# Tipo 3 - http://www.mediafire.com/file/c0ama0jzxk6pbjl
pattern = 'http://www.mediafire.com/file/([a-z0-9]+)'
plugintools.log("[mediafire.py] find_videos #"+pattern+"#")
matches = re.compile(pattern,re.DOTALL).findall(data)
for entry in matches:
if entry != "":
url = 'http://www.mediafire.com/?'+entry
plugintools.log("URL Tipo 3 = "+url)
'''
def novamov(params):
plugintools.log('[%s %s] Novamov %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
media_id = page_url.replace("http://www.novamov.com/video/", "").strip()
# Comprobamos que existe el vídeo
data = scrapertools.cache_page(page_url)
if "This file no longer exists on our servers" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "No existe vídeo en Novamov", 3 , art+'icon.png'))
elif "is being converted" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Vídeo no disponible", 3 , art+'icon.png'))
plugintools.log("[novamov.py] get_video_url(page_url='%s')" % page_url)
html = scrapertools.cache_page(page_url)
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
#get stream url from api
api = 'http://www.novamov.com/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
data = scrapertools.cache_page(api)
data = data.replace("url=", "").strip()
data = data.split("&title=")
url_final = data[0]+'?client=FLASH'
# http://s91.coolcdn.ch/dl/dfdb3d051c3e71db62cf8379259ffcbd/552254ab/ff2e9e3dc0489c213e868d43e74bd1b356.flv?client=FLASH
# http://s181.coolcdn.ch/dl/003aa7721702b4db5598faf880d76386/55225401/fffadbdfcba93c7515995141bcf8b1a95a.flv&title=The.Walking.Dead.S05E13.Vose%26asdasdas&site_http://www.novamov.com/video/f664cf727c58c&seekparm=&enablelimit=0]
plugintools.log("url_final= "+url_final)
plugintools.play_resolved_url(url_final)
def gamovideo(params):
plugintools.log('[%s %s] Gamovideo %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
if "is no longer available" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
headers = [['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']]
data = scrapertools.cache_page( page_url , headers=headers )
try:
'''
<input type="hidden" name="op" value="download1">
<input type="hidden" name="usr_login" value="">
<input type="hidden" name="id" value="auoxxtvyquoy">
<input type="hidden" name="fname" value="Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi">
<input type="hidden" name="referer" value="">
<input type="hidden" name="hash" value="1624-83-46-1377796069-b5e6b8f9759d080a3667adad637f00ac">
<input type="submit" name="imhuman" value="Continue to Video" id="btn_download">
'''
op = scrapertools.get_match(data,'<input type="hidden" name="op" value="(down[^"]+)"')
usr_login = ""
id = scrapertools.get_match(data,'<input type="hidden" name="id" value="([^"]+)"')
fname = scrapertools.get_match(data,'<input type="hidden" name="fname" value="([^"]+)"')
referer = scrapertools.get_match(data,'<input type="hidden" name="referer"\s+value="([^"]*)"')
hashvalue = scrapertools.get_match(data,'<input type="hidden" name="hash" value="([^"]*)"')
submitbutton = scrapertools.get_match(data,'<input type="submit" name="imhuman" value="([^"]+)"').replace(" ","+")
import time
time.sleep(5)
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796019-c2b422f91da55d12737567a14ea3dffe&imhuman=Continue+to+Video
#op=search&usr_login=&id=auoxxtvyquoy&fname=Star.Trek.Into.Darkness.2013.HD.m720p.LAT.avi&referer=&hash=1624-83-46-1377796398-8020e5629f50ff2d7b7de99b55bdb177&imhuman=Continue+to+Video
post = "op="+op+"&usr_login="+usr_login+"&id="+id+"&fname="+fname+"&referer="+referer+"&hash="+hashvalue+"&imhuman="+submitbutton
headers.append(["Referer",page_url])
data = scrapertools.cache_page( page_url , post=post, headers=headers )
plugintools.log("data="+data)
except:
import traceback
traceback.print_exc()
# Extrae la URL
plugintools.log("data="+data)
data = scrapertools.find_single_match(data,"<script type='text/javascript'>(.*?)</script>")
plugintools.log("data="+data)
data = unpackerjs.unpackjs(data)
plugintools.log("data="+data)
# ('jwplayer("vplayer").setup({playlist:[{image:"http://192.99.35.229:8777/i/01/00048/ibw5pte06up4.jpg",sources:[{file:"rtmp://192.99.35.229:1935/vod?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq/mp4:35/3779312894_n.mp4?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq"},{file:"35/3779312894_n.mp4?h=7ax23yxze4pskjwff5zcce7uyyqvxf5ullx3urse54oyq2tepqiko5s6xsoq"}],tracks:[]}],rtmp:{bufferlength:5},height:528,primary:"flash",width:950,captions:{color:\'#FFFFFF\',fontSize:15,fontFamily:"Verdana"}});var vvplay;var tt243542=0;var p0243542=0;jwplayer().onTime(function(x){if(p0243542>0)tt243542+=x.position-p0243542;p0243542=x.position;if(0!=0&&tt243542>=0){p0243542=-1;jwplayer().stop();jwplayer().setFullscreen(false);$(\'#play_limit_box\').show();$(\'div.video_ad\').show()}});jwplayer().onSeek(function(x){p0243542=-1});jwplayer().onPlay(function(x){doPlay(x)});jwplayer().onComplete(function(){$(\'div.video_ad\').show()});function doPlay(x){$(\'div.video_ad\').hide();if(vvplay)return;vvplay=1;}',,355,
data = data.replace('file:"rtmp://', 'streamer:"')
pfile = plugintools.find_single_match(data,'file\s*\:\s*"([^"]+)"')
pstreamer = 'rtmp://'+plugintools.find_single_match(data,'streamer\s*\:\s*"([^"]+)"')
media_url = pstreamer + " playpath=" + pfile.replace("playpath=", "").strip()
plugintools.log("media_url= "+media_url)
plugintools.play_resolved_url(media_url)
def moevideos(params):
plugintools.log('[%s %s] Moevideos %s' % (addonName, addonVersion, repr(params)))
# No existe / borrado: http://www.moevideos.net/online/27991
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
plugintools.log("data= "+data)
if "<span class='tabular'>No existe</span>" in data:
return False,"No existe o ha sido borrado de moevideos"
else:
# Existe: http://www.moevideos.net/online/18998
patron = "<span class='tabular'>([^>]+)</span>"
headers = []
headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
data = scrapertools.cache_page( page_url , headers=headers )
# Descarga el script (no sirve para nada, excepto las cookies)
headers.append(['Referer',page_url])
post = "id=1&enviar2=ver+video"
data = scrapertools.cache_page( page_url , post=post, headers=headers )
### Modificado 12-6-2014
#code = scrapertools.get_match(data,'flashvars\="file\=([^"]+)"')
#<iframe width="860" height="440" src="http://moevideo.net/framevideo/16363.1856374b43bbd40c7f8d2b25b8e5?width=860&height=440" frameborder="0" allowfullscreen ></iframe>
code = scrapertools.get_match(data,'<iframe width="860" height="440" src="http://moevideo.net/framevideo/([^\?]+)\?width=860\&height=440" frameborder="0" allowfullscreen ></iframe>')
plugintools.log("code="+code)
# API de letitbit
headers2 = []
headers2.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
### Modificado 12-6-2014
url = "http://api.letitbit.net"
#url = "http://api.moevideo.net"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2272871%2E71f6541e64b0eda8da727a79424d%22%7D%5D%5D"
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2212110%2E1424270cc192f8856e07d5ba179d%22%7D%5D%5D
#post = "r=%5B%22tVL0gjqo5%22%2C%5B%22preview%2Fflv%5Fimage%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%2C%5B%22preview%2Fflv%5Flink%22%2C%7B%22uid%22%3A%2268653%2E669cbb12a3b9ebee43ce14425d9e%22%7D%5D%5D"
post = 'r=["tVL0gjqo5",["preview/flv_image",{"uid":"'+code+'"}],["preview/flv_link",{"uid":"'+code+'"}]]'
data = scrapertools.cache_page(url,headers=headers2,post=post)
plugintools.log("data="+data)
if ',"not_found"' in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
data = data.replace("\\","")
plugintools.log("data="+data)
patron = '"link"\:"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
video_url = matches[0]+"?ref=www.moevideos.net|User-Agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:15.0) Gecko/20100101 Firefox/15.0.1&Range=bytes:0-"
plugintools.log("[moevideos.py] video_url="+video_url)
video_urls = []
video_urls.append( [ scrapertools.get_filename_from_url(video_url)[-4:] + " [moevideos]",video_url ] )
plugintools.play_resolved_url(video_url[1])
def movshare(params):
plugintools.log('[%s %s] Movshare %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
if "This file no longer exists on our servers" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo borrado!", 3 , art+'icon.png'))
else:
videoid = scrapertools.get_match(page_url,"http://www.movshare.net/video/([a-z0-9]+)")
video_urls = []
# Descarga la página
headers = []
headers.append( ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'] )
data = scrapertools.cache_page(page_url , headers = headers)
# La vuelve a descargar, como si hubieras hecho click en el botón
#html = scrapertools.cache_page(page_url , headers = headers)
filekey = plugintools.find_single_match(data,'flashvars.filekey="([^"]+)"')
#get stream url from api
api = 'http://www.movshare.net/api/player.api.php?key=%s&file=%s' % (filekey, videoid)
headers.append( ['Referer',page_url] )
html = scrapertools.cache_page(api,headers=headers)
plugintools.log("html="+html)
stream_url = plugintools.find_single_match(html,'url=(.+?)&title')
if stream_url!="":
video_urls.append( [ scrapertools.get_filename_from_url(stream_url)[-4:]+" [movshare]" , stream_url ] )
for video_url in video_urls:
plugintools.log("[movshare.py] %s - %s" % (video_url[0],video_url[1]))
plugintools.log("url_final= "+video_url[1])
plugintools.play_resolved_url(video_url[1])
def movreel(params):
plugintools.log('[%s %s] Movreel %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
video_urls = []
data = scrapertools.cache_page(page_url)
op = plugintools.find_single_match(data,'<input type="hidden" name="op" value="([^"]+)">')
file_code = plugintools.find_single_match(data,'<input type="hidden" name="file_code" value="([^"]+)">')
w = plugintools.find_single_match(data,'<input type="hidden" name="w" value="([^"]+)">')
h = plugintools.find_single_match(data,'<input type="hidden" name="h" value="([^"]+)">')
method_free = plugintools.find_single_match(data,'<input type="submit" name="method_free" value="([^"]+)">')
#op=video_embed&file_code=yrwo5dotp1xy&w=600&h=400&method_free=Close+Ad+and+Watch+as+Free+User
#post = 'op=video_embed&file_code='+file_code+'+&w='+w+'&h='+h+'$method_free='+method_free
post = urllib.urlencode( {"op":op,"file_code":file_code,"w":w,"h":h,"method_free":method_free} )
print 'post',post
data = scrapertools.cache_page(page_url,post=post)
#plugintools.log("data="+data)
data = unpackerjs.unpackjs(data)
plugintools.log("data="+data)
media_url = plugintools.find_single_match(data,'file\:"([^"]+)"')
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [movreel]",media_url])
for video_url in video_urls:
plugintools.log("[movreel.py] %s - %s" % (video_url[0],video_url[1]))
print video_urls
# PENDIENTE DE RESOLVER URL !!!
def videobam(params):
plugintools.log('[%s %s] Videobam %s' % (addonName, addonVersion, repr(params)))
page_url = params.get("url")
data = scrapertools.cache_page(page_url)
videourl = ""
match = ""
if "Video is processing" in data:
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('Bassfox', "Archivo no disponible temporalmente!", 3 , art+'icon.png'))
else:
patronHD = " high: '([^']+)'"
matches = re.compile(patronHD,re.DOTALL).findall(data)
for match in matches:
videourl = match
plugintools.log("Videobam HQ :"+match)
if videourl == "":
patronSD= " low: '([^']+)'"
matches = re.compile(patronSD,re.DOTALL).findall(data)
for match in matches:
videourl = match
plugintools.log("Videobam LQ :"+match)
if match == "":
if len(matches)==0:
# "scaling":"fit","url":"http:\/\/f10.videobam.com\/storage\/11\/videos\/a\/aa\/AaUsV\/encoded.mp4
patron = '[\W]scaling[\W]:[\W]fit[\W],[\W]url"\:"([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for match in matches:
videourl = match.replace('\/','/')
videourl = urllib.unquote(videourl)
plugintools.log("Videobam scaling: "+videourl)
if videourl != "":
plugintools.play_resolved_url(videourl)
else:
plugintools.play_resolved_url(videourl)
def vimeo(params):
plugintools.log("servers.vimeo get_video_url(page_url='%s')" % repr(params))
page_url = params.get("url")
headers = []
headers.append( ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'] )
data = scrapertools.cache_page(page_url, headers=headers)
'''
<div class="player" style="background-image: url(http://b.vimeocdn.com/ts/433/562/433562952_960.jpg);" id="player_1_53086fb0f413f" data-config-url="http://player.vimeo.com/v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96" data-fallback-url="//player.vimeo.com/v2/video/63073570/fallback?js"
'''
url = scrapertools.find_single_match(data,'<div class="player" style="[^"]+" id="[^"]+" data-config-url="([^"]+)"')
url = url.replace("&","&")
headers.append( ['Referer',page_url] )
data = scrapertools.cache_page(url, headers=headers)
json_object = jsontools.load_json(data)
'''
http://player.vimeo.com/v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96
> GET /v2/video/63073570/config?autoplay=0&byline=0&bypass_privacy=1&context=clip.main&default_to_hd=1&portrait=0&title=0&s=4268c7772994be693b480b75b5d84452f3e81f96 HTTP/1.1
> User-Agent: curl/7.24.0 (x86_64-apple-darwin12.0) libcurl/7.24.0 OpenSSL/0.9.8y zlib/1.2.5
> Host: player.vimeo.com
> Accept: */*
>
< HTTP/1.1 200 OK
< Expires: Sun, 23 02 2014 09:39:32 GMT
< Vary: Origin, Accept-Encoding
< Etag: "009d88dc9b151e402faf10efb7ba4cabe0412385"
< P3p: CP="This is not a P3P policy! See http://vimeo.com/privacy"
< Content-Type: application/json
< Transfer-Encoding: chunked
< Date: Sat, 22 Feb 2014 09:39:32 GMT
< X-Varnish: 1162931632
< Age: 0
< Via: 1.1 varnish
< Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0
< X-Player2: 1
< X-Varnish-Cache: 0
< nnCoection: close
< X-VServer: 10.90.128.193
<
* Connection #0 to host player.vimeo.com left intact
{"cdn_url":"http://a.vimeocdn.com","view":1,"request":{"files":{"h264":{"hd":{"profile":113,"origin":"ns3.pdl","url":"http://pdl.vimeocdn.com/72437/773/155150233.mp4?token2=1393065072_197f0ca458049c7217e9e8969c373af1&aksessionid=358994b3a75767bb","height":720,"width":1280,"id":155150233,"bitrate":2658,"availability":60},"sd":{"profile":112,"origin":"ns3.pdl","url":"http://pdl.vimeocdn.com/44925/440/155100150.mp4?token2=1393065072_cd5b62387758a46798e02dbd0b19bd3e&aksessionid=56c93283ac081129","height":360,"width":640,"id":155100150,"bitrate":860,"availability":60}},"hls":{"all":"http://av70.hls.vimeocdn.com/i/,44925/440/155100150,72437/773/155150233,.mp4.csmil/master.m3u8?primaryToken=1393065072_fe1a557fd7460bc8409bf09960614694","hd":"http://av70.hls.vimeocdn.com/i/,72437/773/155150233,.mp4.csmil/master.m3u8?primaryToken=1393065072_8ba190ee7643f318c75dc265a14b750d"},"codecs":["h264"]},"ga_account":"UA-76641-35","timestamp":1393061972,"expires":3100,"prefix":"/v2","session":"9d8f0ce5a2de113df027f1f1d2428648","cookie":{"scaling":1,"volume":1.0,"hd":null,"captions":null},"cookie_domain":".vimeo.com","referrer":null,"conviva_account":"c3.Vimeo","flags":{"login":1,"preload_video":1,"plays":1,"partials":1,"conviva":1},"build":{"player":"d854ba1a","js":"2.3.7"},"urls":{"zeroclip_swf":"http://a.vimeocdn.com/p/external/zeroclipboard/ZeroClipboard.swf","js":"http://a.vimeocdn.com/p/2.3.7/js/player.js","proxy":"https://secure-a.vimeocdn.com/p/2.3.7/proxy.html","conviva":"http://livepassdl.conviva.com/ver/2.72.0.13589/LivePass.js","flideo":"http://a.vimeocdn.com/p/flash/flideo/1.0.3b10/flideo.swf","canvas_js":"http://a.vimeocdn.com/p/2.3.7/js/player.canvas.js","moog":"http://a.vimeocdn.com/p/flash/moogaloop/6.0.7/moogaloop.swf?clip_id=63073570","conviva_service":"http://livepass.conviva.com","moog_js":"http://a.vimeocdn.com/p/2.3.7/js/moogaloop.js","zeroclip_js":"http://a.vimeocdn.com/p/external/zeroclipboard/ZeroClipboard-patch.js","css":"http://a.vimeocdn.com/p/2.3.7/css/player.css"},"signature":"67ef54c1e894448dd7c38e7da8a3bdba"},"player_url":"player.vimeo.com","video":{"allow_hd":1,"height":720,"owner":{"account_type":"basic","name":"Menna Fit\u00e9","img":"http://b.vimeocdn.com/ps/446/326/4463264_75.jpg","url":"http://vimeo.com/user10601457","img_2x":"http://b.vimeocdn.com/ps/446/326/4463264_300.jpg","id":10601457},"thumbs":{"1280":"http://b.vimeocdn.com/ts/433/562/433562952_1280.jpg","960":"http://b.vimeocdn.com/ts/433/562/433562952_960.jpg","640":"http://b.vimeocdn.com/ts/433/562/433562952_640.jpg"},"duration":2200,"id":63073570,"hd":1,"embed_code":"<iframe src=\"//player.vimeo.com/video/63073570\" width=\"500\" height=\"281\" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>","default_to_hd":1,"title":"No le digas a la Mama que me he ido a Mongolia en Moto","url":"http://vimeo.com/63073570","privacy":"anybody","share_url":"http://vimeo.com/63073570","width":1280,"embed_permission":"public","fps":25.0},"build":{"player":"d854ba1a","rpc":"dev"},"embed":{"player_id":null,"outro":"nothing","api":2,"context":"clip.main","time":0,"color":"00adef","settings":{"fullscreen":1,"instant_sidedock":1,"byline":0,"like":1,"playbar":1,"title":0,"color":1,"branding":0,"share":1,"scaling":1,"logo":0,"info_on_pause":0,"watch_later":1,"portrait":0,"embed":1,"badge":0,"volume":1},"on_site":1,"loop":0,"autoplay":0},"vimeo_url":"vimeo.com","user":{"liked":0,"account_type":"none","logged_in":0,"owner":0,"watch_later":0,"id":0,"mod":0}}* Closing connection #0
'''
media_url = json_object['request']['files']['h264']['hd']['url']
video_urls.append( [ "HD [vimeo]",media_url ] )
media_url = json_object['request']['files']['h264']['sd']['url']
video_urls.append( [ "SD [vimeo]",media_url ] )
| gpl-2.0 |
piquadrat/django | tests/many_to_one/models.py | 63 | 2926 | """
Many-to-one relationships
To define a many-to-one relationship, use ``ForeignKey()``.
"""
from django.db import models
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, models.CASCADE)
def __str__(self):
return self.headline
class Meta:
ordering = ('headline',)
class City(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class District(models.Model):
city = models.ForeignKey(City, models.CASCADE, related_name='districts', null=True)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
# If ticket #1578 ever slips back in, these models will not be able to be
# created (the field names being lower-cased versions of their opposite
# classes is important here).
class First(models.Model):
second = models.IntegerField()
class Second(models.Model):
first = models.ForeignKey(First, models.CASCADE, related_name='the_first')
# Protect against repetition of #1839, #2415 and #2536.
class Third(models.Model):
name = models.CharField(max_length=20)
third = models.ForeignKey('self', models.SET_NULL, null=True, related_name='child_set')
class Parent(models.Model):
name = models.CharField(max_length=20, unique=True)
bestchild = models.ForeignKey('Child', models.SET_NULL, null=True, related_name='favored_by')
class Child(models.Model):
name = models.CharField(max_length=20)
parent = models.ForeignKey(Parent, models.CASCADE)
class ToFieldChild(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE, to_field='name')
# Multiple paths to the same model (#7110, #7125)
class Category(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Record(models.Model):
category = models.ForeignKey(Category, models.CASCADE)
class Relation(models.Model):
left = models.ForeignKey(Record, models.CASCADE, related_name='left_set')
right = models.ForeignKey(Record, models.CASCADE, related_name='right_set')
def __str__(self):
return "%s - %s" % (self.left.category.name, self.right.category.name)
# Test related objects visibility.
class SchoolManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class School(models.Model):
is_public = models.BooleanField(default=False)
objects = SchoolManager()
class Student(models.Model):
school = models.ForeignKey(School, models.CASCADE)
| bsd-3-clause |
yahman72/robotframework | atest/testdata/standard_libraries/remote/arguments.py | 25 | 2920 | import sys
from xmlrpclib import Binary
from remoteserver import RemoteServer
class Arguments(object):
def argument_should_be(self, argument, expected, binary=False):
if binary:
argument = self._handle_binary(argument)
expected = eval(expected)
assert argument == expected, '%r != %r' % (argument, expected)
def _handle_binary(self, arg, required=True):
if isinstance(arg, list):
return self._handle_binary_in_list(arg)
if isinstance(arg, dict):
return self._handle_binary_in_dict(arg)
assert isinstance(arg, Binary) or not required, 'Non-binary argument'
return str(arg) if isinstance(arg, Binary) else arg
def _handle_binary_in_list(self, arg):
assert any(isinstance(a, Binary) for a in arg)
return [self._handle_binary(a, required=False) for a in arg]
def _handle_binary_in_dict(self, arg):
assert any(isinstance(key, Binary) or isinstance(value, Binary)
for key, value in arg.items())
return dict((self._handle_binary(key, required=False),
self._handle_binary(value, required=False))
for key, value in arg.items())
def kwarg_should_be(self, **kwargs):
self.argument_should_be(**kwargs)
def no_arguments(self):
return self._format_args()
def one_argument(self, arg):
return self._format_args(arg)
def two_arguments(self, arg1, arg2):
return self._format_args(arg1, arg2)
def five_arguments(self, arg1, arg2, arg3, arg4, arg5):
return self._format_args(arg1, arg2, arg3, arg4, arg5)
def arguments_with_default_values(self, arg1, arg2=2, arg3='3'):
return self._format_args(arg1, arg2, arg3)
def varargs(self, *args):
return self._format_args(*args)
def required_defaults_and_varargs(self, req, default='world', *varargs):
return self._format_args(req, default, *varargs)
def kwargs(self, **kwargs):
return self._format_args(**kwargs)
def args_and_kwargs(self, arg1='default1', arg2='default2', **kwargs):
return self._format_args(arg1, arg2, **kwargs)
def varargs_and_kwargs(self, *varargs, **kwargs):
return self._format_args(*varargs, **kwargs)
def args_varargs_and_kwargs(self, arg1='default1', arg2='default2',
*varargs, **kwargs):
return self._format_args(arg1, arg2, *varargs, **kwargs)
def _format_args(self, *args, **kwargs):
args += tuple('%s:%s' % (k, self._type(v))
for k, v in sorted(kwargs.items()))
return ', '.join(self._type(a) for a in args)
def _type(self, arg):
if not isinstance(arg, basestring):
return '%s (%s)' % (arg, type(arg).__name__)
return arg
if __name__ == '__main__':
RemoteServer(Arguments(), *sys.argv[1:])
| apache-2.0 |
mxcube/mxcube | mxcubeqt/utils/widget_utils.py | 1 | 9801 | #
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
from mxcubeqt.utils import colors, qt_import
from mxcubecore.dispatcher import dispatcher
from mxcubecore.ConvertUtils import string_types
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
class DataModelInputBinder(object):
def __init__(self, obj):
object.__init__(self)
self.__model = obj
# Key - field name/attribute name of the persistant object.
# Value - The tuple (widget, validator, type_fn)
self.bindings = {}
dispatcher.connect(self._update_widget, "model_update", dispatcher.Any)
def __checkbox_update_value(self, field_name, new_value):
setattr(self.__model, field_name, new_value)
dispatcher.send("model_update", self.__model, field_name, self)
def __combobox_update_value(self, field_name, new_value):
setattr(self.__model, field_name, new_value)
dispatcher.send("model_update", self.__model, field_name, self)
def __ledit_update_value(self, field_name, widget, new_value, type_fn, validator):
if not self.bindings[field_name][3]:
origin_value = new_value
if type_fn == float and validator:
pattern = "%." + str(validator.decimals()) + "f"
new_value = pattern % float(new_value)
# fix validation if PyQt4 and sipapi 1 is used
if isinstance(new_value, string_types):
if "QString" in globals():
new_value = qt_import.QString(new_value)
self.__validated(
field_name, validator, self.bindings[field_name][0], new_value
)
if isinstance(widget, qt_import.QLineEdit):
if type_fn is float and validator:
widget.setText(
"{:g}".format(
round(float(origin_value), validator.decimals())
)
)
try:
setattr(self.__model, field_name, type_fn(origin_value))
except ValueError:
if origin_value != "":
raise
else:
dispatcher.send("model_update", self.__model, field_name, self)
def __ledit_text_edited(self, field_name, widget, new_value, type_fn, validator):
self.bindings[field_name][3] = True
if self.__validated(
field_name, validator, self.bindings[field_name][0], new_value
):
try:
setattr(self.__model, field_name, type_fn(new_value))
except ValueError:
if new_value != "":
raise
else:
dispatcher.send("model_update", self.__model, field_name, self)
def __validated(self, field_name, validator, widget, new_value):
if validator:
try:
flt_value = float(new_value)
except BaseException:
colors.set_widget_color(
widget, colors.LIGHT_RED, qt_import.QPalette.Base
)
return False
if flt_value >= min(
validator.bottom(), validator.top()
) and flt_value <= max(validator.bottom(), validator.top()):
# if validator.validate(new_value, widget.cursorPosition())[0] \
# == QValidator.Acceptable:
if self.bindings[field_name][3]:
colors.set_widget_color(
widget, colors.LIGHT_YELLOW, qt_import.QPalette.Base
)
else:
colors.set_widget_color(
widget, colors.WHITE, qt_import.QPalette.Base
)
return True
else:
colors.set_widget_color(
widget, colors.LIGHT_RED, qt_import.QPalette.Base
)
return False
else:
if self.bindings[field_name][3]:
colors.set_widget_color(
widget, colors.LIGHT_YELLOW, qt_import.QPalette.Base
)
else:
colors.set_widget_color(widget, colors.WHITE, qt_import.QPalette.Base)
return True
def get_model(self):
return self.__model
def set_model(self, obj):
self.__model = obj
self.init_bindings()
self.clear_edit()
self.validate_all()
def init_bindings(self):
for field_name in self.bindings.keys():
self._update_widget(field_name, None)
def _update_widget(self, field_name, data_binder):
if data_binder == self:
return
try:
widget, validator, type_fn, edited = self.bindings[field_name]
except KeyError:
return
try:
widget.blockSignals(True)
if isinstance(widget, qt_import.QLineEdit):
if type_fn is float and validator:
if getattr(self.__model, field_name):
value = float(getattr(self.__model, field_name))
widget.setText(
"{:g}".format(round(float(value), validator.decimals()))
)
elif type_fn is int and validator:
value = int(getattr(self.__model, field_name))
widget.setText("%d" % value)
else:
widget.setText(str(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QLabel):
widget.setText(str(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QComboBox):
widget.setCurrentIndex(int(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QCheckBox) or isinstance(
widget, qt_import.QRadioButton
):
widget.setChecked(bool(getattr(self.__model, field_name)))
finally:
widget.blockSignals(False)
def bind_value_update(self, field_name, widget, type_fn, validator=None):
self.bindings[field_name] = [widget, validator, type_fn, False]
if isinstance(widget, qt_import.QLineEdit):
widget.textChanged.connect(
lambda new_value: self.__ledit_update_value(
field_name, widget, new_value, type_fn, validator
)
)
widget.textEdited.connect(
lambda new_value: self.__ledit_text_edited(
field_name, widget, new_value, type_fn, validator
)
)
if type_fn is float and validator:
pattern = "%." + str(validator.decimals()) + "f"
if getattr(self.__model, field_name):
widget.setText(pattern % float(getattr(self.__model, field_name)))
else:
widget.setText(str(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QLabel):
widget.setText(str(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QComboBox):
widget.activated.connect(
lambda new_value: self.__combobox_update_value(field_name, new_value)
)
widget.setCurrentIndex(int(getattr(self.__model, field_name)))
elif isinstance(widget, qt_import.QCheckBox) or isinstance(
widget, qt_import.QRadioButton
):
widget.toggled.connect(
lambda new_value: self.__checkbox_update_value(field_name, new_value)
)
widget.setChecked(bool(getattr(self.__model, field_name)))
if validator and not widget.toolTip():
if isinstance(validator, qt_import.QDoubleValidator):
tooltip = "%s limits %.2f : %.2f" % (
field_name.replace("_", " ").capitalize(),
validator.bottom(),
validator.top(),
)
else:
tooltip = "%s limits %d : %d" % (
field_name.replace("_", " ").capitalize(),
validator.bottom(),
validator.top(),
)
widget.setToolTip(tooltip)
def validate_all(self):
result = []
for item in self.bindings.items():
key = item[0]
widget = item[1][0]
validator = item[1][1]
# if validator:
if isinstance(widget, qt_import.QLineEdit):
if not self.__validated(key, validator, widget, widget.text()):
result.append(key)
elif isinstance(widget, qt_import.QComboBox):
pass
elif isinstance(widget, qt_import.QCheckBox) or isinstance(
widget, qt_import.QRadioButton
):
pass
return result
def clear_edit(self):
for key in self.bindings.keys():
self.bindings[key][3] = False
| lgpl-3.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/python-ldap-2.3.13/build/lib.linux-i686-2.7/ldap/dn.py | 45 | 2793 | """
dn.py - misc stuff for handling distinguished names (see RFC 4514)
See http://www.python-ldap.org/ for details.
\$Id: dn.py,v 1.11 2010/06/03 12:26:39 stroeder Exp $
Compability:
- Tested with Python 2.0+
"""
from ldap import __version__
import _ldap
import ldap.functions
def escape_dn_chars(s):
"""
Escape all DN special characters found in s
with a back-slash (see RFC 4514, section 2.4)
"""
if s:
s = s.replace('\\','\\\\')
s = s.replace(',' ,'\\,')
s = s.replace('+' ,'\\+')
s = s.replace('"' ,'\\"')
s = s.replace('<' ,'\\<')
s = s.replace('>' ,'\\>')
s = s.replace(';' ,'\\;')
s = s.replace('=' ,'\\=')
s = s.replace('\000' ,'\\\000')
if s[0]=='#' or s[0]==' ':
s = ''.join(('\\',s))
if s[-1]==' ':
s = ''.join((s[:-1],'\\ '))
return s
def str2dn(dn,flags=0):
"""
This function takes a DN as string as parameter and returns
a decomposed DN. It's the inverse to dn2str().
flags describes the format of the dn
See also the OpenLDAP man-page ldap_str2dn(3)
"""
if not dn:
return []
return ldap.functions._ldap_function_call(None,_ldap.str2dn,dn,flags)
def dn2str(dn):
"""
This function takes a decomposed DN as parameter and returns
a single string. It's the inverse to str2dn() but will always
return a DN in LDAPv3 format compliant to RFC 4514.
"""
return ','.join([
'+'.join([
'='.join((atype,escape_dn_chars(avalue or '')))
for atype,avalue,dummy in rdn])
for rdn in dn
])
def explode_dn(dn,notypes=0,flags=0):
"""
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not dn:
return []
dn_decomp = str2dn(dn,flags)
rdn_list = []
for rdn in dn_decomp:
if notypes:
rdn_list.append('+'.join([
escape_dn_chars(avalue or '')
for atype,avalue,dummy in rdn
]))
else:
rdn_list.append('+'.join([
'='.join((atype,escape_dn_chars(avalue or '')))
for atype,avalue,dummy in rdn
]))
return rdn_list
def explode_rdn(rdn,notypes=0,flags=0):
"""
explode_rdn(rdn [, notypes=0]) -> list
This function takes a RDN and breaks it up into its component parts
if it is a multi-valued RDN.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not rdn:
return []
rdn_decomp = str2dn(rdn,flags)[0]
if notypes:
return [avalue or '' for atype,avalue,dummy in rdn_decomp]
else:
return ['='.join((atype,escape_dn_chars(avalue or ''))) for atype,avalue,dummy in rdn_decomp]
| apache-2.0 |
eHealthAfrica/rapidpro | temba/flows/migrations/0099_populate_recent_message.py | 3 | 2250 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-26 08:33
from __future__ import unicode_literals
from django.db import migrations
def get_recent_messages_for_segment(FlowPathRecentStep, from_uuid, to_uuid):
recent_steps = FlowPathRecentStep.objects.filter(from_uuid=from_uuid, to_uuid=to_uuid)
recent_steps = recent_steps.prefetch_related('step__messages').order_by('-left_on')
messages = []
for r in recent_steps:
for msg in r.step.messages.all():
if msg.visibility == 'V':
msg.run_id = r.step.run_id
messages.append(msg)
if len(messages) >= 5:
return messages
return messages
def populate_recent_message(FlowPathRecentStep, FlowPathRecentMessage):
# get the unique flow path segments
segments = list(FlowPathRecentStep.objects.values_list('from_uuid', 'to_uuid').distinct('from_uuid', 'to_uuid'))
for s, segment in enumerate(segments):
from_uuid = segment[0]
to_uuid = segment[1]
messages = get_recent_messages_for_segment(FlowPathRecentStep, from_uuid, to_uuid)
recent_messages = []
for msg in messages:
r = FlowPathRecentMessage(from_uuid=from_uuid, to_uuid=to_uuid,
run_id=msg.run_id, text=msg.text[:640], created_on=msg.created_on)
recent_messages.append(r)
FlowPathRecentMessage.objects.bulk_create(recent_messages)
if (s + 1) % 100 == 0:
print("Converted recent steps to recent messages for %d of %d segments" % (s + 1, len(segments)))
def apply_manual():
from temba.flows.models import FlowPathRecentStep, FlowPathRecentMessage
populate_recent_message(FlowPathRecentStep, FlowPathRecentMessage)
def apply_as_migration(apps, schema_editor):
FlowPathRecentStep = apps.get_model('flows', 'FlowPathRecentStep')
FlowPathRecentMessage = apps.get_model('flows', 'FlowPathRecentMessage')
populate_recent_message(FlowPathRecentStep, FlowPathRecentMessage)
class Migration(migrations.Migration):
dependencies = [
('flows', '0098_flowpathrecentmessage'),
]
operations = [
migrations.RunPython(apply_as_migration)
]
| agpl-3.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/Demos/winprocess.py | 6 | 7326 | """
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <jbd@alum.mit.edu>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
import os
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError('process timeout exceeded')
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print('Testing winprocess.py...')
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This 'notepad' process will terminate in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd_name = tempfile.mktemp()
out_name = cmd_name + '.txt'
try:
cmd = open(cmd_name, "w+b")
out = open(out_name, "w+b")
cmd.write(cmdString.encode('mbcs'))
cmd.seek(0)
print('CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out))
cmd.close()
print('NOTEPAD exit code:', run('notepad.exe %s' % out.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000))
out.close()
finally:
for n in (cmd_name, out_name):
try:
os.unlink(cmd_name)
except os.error:
pass
| apache-2.0 |
mozilla/make.mozilla.org | vendor-local/lib/python/amqplib/client_0_8/abstract_channel.py | 22 | 3464 | """
Code common to Connection and Channel objects.
"""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
from serialization import AMQPWriter
try:
bytes
except NameError:
# Python 2.5 and lower
bytes = str
__all__ = [
'AbstractChannel',
]
class AbstractChannel(object):
"""
Superclass for both the Connection, which is treated
as channel 0, and other user-created Channel objects.
The subclasses must have a _METHOD_MAP class property, mapping
between AMQP method signatures and Python methods.
"""
def __init__(self, connection, channel_id):
self.connection = connection
self.channel_id = channel_id
connection.channels[channel_id] = self
self.method_queue = [] # Higher level queue for methods
self.auto_decode = False
def __enter__(self):
"""
Support for Python >= 2.5 'with' statements.
"""
return self
def __exit__(self, type, value, traceback):
"""
Support for Python >= 2.5 'with' statements.
"""
self.close()
def _send_method(self, method_sig, args=bytes(), content=None):
"""
Send a method for our channel.
"""
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content)
def close(self):
"""
Close this Channel or Connection
"""
raise NotImplementedError('Must be overriden in subclass')
def wait(self, allowed_methods=None):
"""
Wait for a method that matches our allowed_methods parameter (the
default value of None means match any method), and dispatch to it.
"""
method_sig, args, content = self.connection._wait_method(
self.channel_id, allowed_methods)
return self.dispatch_method(method_sig, args, content)
def dispatch_method(self, method_sig, args, content):
if content \
and self.auto_decode \
and hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = self._METHOD_MAP.get(method_sig, None)
if amqp_method is None:
raise Exception('Unknown AMQP method (%d, %d)' % method_sig)
if content is None:
return amqp_method(self, args)
else:
return amqp_method(self, args, content)
#
# Placeholder, the concrete implementations will have to
# supply their own versions of _METHOD_MAP
#
_METHOD_MAP = {}
| bsd-3-clause |
cnbin/linux | scripts/gdb/linux/cpus.py | 997 | 3560 | #
# gdb helper commands and functions for Linux kernel debugging
#
# per-cpu tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import tasks, utils
MAX_CPUS = 4096
def get_current_cpu():
if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
return gdb.selected_thread().num - 1
elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
tid = gdb.selected_thread().ptid[2]
if tid > (0x100000000 - MAX_CPUS - 2):
return 0x100000000 - tid - 2
else:
return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
else:
raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
"supported with this gdb server.")
def per_cpu(var_ptr, cpu):
if cpu == -1:
cpu = get_current_cpu()
if utils.is_target_arch("sparc:v9"):
offset = gdb.parse_and_eval(
"trap_block[{0}].__per_cpu_base".format(str(cpu)))
else:
try:
offset = gdb.parse_and_eval(
"__per_cpu_offset[{0}]".format(str(cpu)))
except gdb.error:
# !CONFIG_SMP case
offset = 0
pointer = var_ptr.cast(utils.get_long_type()) + offset
return pointer.cast(var_ptr.type).dereference()
cpu_mask = {}
def cpu_mask_invalidate(event):
global cpu_mask
cpu_mask = {}
gdb.events.stop.disconnect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
def cpu_list(mask_name):
global cpu_mask
mask = None
if mask_name in cpu_mask:
mask = cpu_mask[mask_name]
if mask is None:
mask = gdb.parse_and_eval(mask_name + ".bits")
if hasattr(gdb, 'events'):
cpu_mask[mask_name] = mask
gdb.events.stop.connect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(cpu_mask_invalidate)
bits_per_entry = mask[0].type.sizeof * 8
num_entries = mask.type.sizeof * 8 / bits_per_entry
entry = -1
bits = 0
while True:
while bits == 0:
entry += 1
if entry == num_entries:
return
bits = mask[entry]
if bits != 0:
bit = 0
break
while bits & 1 == 0:
bits >>= 1
bit += 1
cpu = entry * bits_per_entry + bit
bits >>= 1
bit += 1
yield cpu
class PerCpu(gdb.Function):
"""Return per-cpu variable.
$lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the
given CPU number. If CPU is omitted, the CPU of the current context is used.
Note that VAR has to be quoted as string."""
def __init__(self):
super(PerCpu, self).__init__("lx_per_cpu")
def invoke(self, var_name, cpu=-1):
var_ptr = gdb.parse_and_eval("&" + var_name.string())
return per_cpu(var_ptr, cpu)
PerCpu()
class LxCurrentFunc(gdb.Function):
"""Return current task.
$lx_current([CPU]): Return the per-cpu task variable for the given CPU
number. If CPU is omitted, the CPU of the current context is used."""
def __init__(self):
super(LxCurrentFunc, self).__init__("lx_current")
def invoke(self, cpu=-1):
var_ptr = gdb.parse_and_eval("¤t_task")
return per_cpu(var_ptr, cpu).dereference()
LxCurrentFunc()
| gpl-2.0 |
rue89-tech/edx-platform | common/test/acceptance/tests/studio/test_studio_bad_data.py | 173 | 3970 | from base_studio_test import ContainerBase
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.utils import verify_ordering
class BadComponentTest(ContainerBase):
"""
Tests that components with bad content do not break the Unit page.
"""
__test__ = False
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
pass
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a HTML component with bad data and a properly constructed problem.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Unit HTML', data=self.get_bad_html_content()),
XBlockFixtureDesc('problem', 'Unit Problem', data='<problem></problem>')
)
)
)
)
def test_html_comp_visible(self):
"""
Tests that bad HTML data within an HTML component doesn't prevent Studio from
displaying the components on the unit page.
"""
unit = self.go_to_unit_page()
verify_ordering(self, unit, [{"": ["Unit HTML", "Unit Problem"]}])
class CopiedFromLmsBadContentTest(BadComponentTest):
"""
Tests that components with HTML copied from the LMS (LmsRuntime) do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return """
<div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule xblock-initialized"
data-runtime-class="LmsRuntime" data-init="XBlockToXModuleShim" data-block-type="html"
data-runtime-version="1" data-type="HTMLModule" data-course-id="GeorgetownX/HUMW-421-01"
data-request-token="thisIsNotARealRequestToken"
data-usage-id="i4x:;_;_GeorgetownX;_HUMW-421-01;_html;_3010cbbecaa1484da6cf8ba01362346a">
<p>Copied from LMS HTML component</p></div>
"""
class CopiedFromStudioBadContentTest(BadComponentTest):
"""
Tests that components with HTML copied from the Studio (containing "ui-sortable" class) do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return """
<ol class="components ui-sortable">
<li class="component" data-locator="i4x://Wellesley_College/100/html/6390f1fd3fe640d49580b8415fe1330b"
data-course-key="Wellesley_College/100/2014_Summer">
<div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule xblock-initialized"
data-runtime-class="PreviewRuntime" data-init="XBlockToXModuleShim" data-runtime-version="1"
data-request-token="thisIsNotARealRequestToken"
data-usage-id="i4x://Wellesley_College/100/html/6390f1fd3fe640d49580b8415fe1330b"
data-type="HTMLModule" data-block-type="html">
<h2>VOICE COMPARISON </h2>
<p>You can access the experimental <strong >Voice Comparison</strong> tool at the link below.</p>
</div>
</li>
</ol>
"""
class JSErrorBadContentTest(BadComponentTest):
"""
Tests that components that throw JS errors do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return "<script>var doesNotExist = BadGlobal.foo;</script>"
| agpl-3.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/lib_openshift/src/ansible/oc_secret.py | 10 | 1157 | # pylint: skip-file
# flake8: noqa
def main():
'''
ansible oc module for managing OpenShift Secrets
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
force=dict(default=False, type='bool'),
decode=dict(default=False, type='bool'),
),
mutually_exclusive=[["contents", "files"]],
supports_check_mode=True,
)
rval = OCSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
| apache-2.0 |
azureplus/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_request_form.py | 47 | 1183 | import six
from paste.request import *
from paste.util.multidict import MultiDict
def test_parse_querystring():
e = {'QUERY_STRING': 'a=1&b=2&c=3&b=4'}
d = parse_querystring(e)
assert d == [('a', '1'), ('b', '2'), ('c', '3'), ('b', '4')]
assert e['paste.parsed_querystring'] == (
(d, e['QUERY_STRING']))
e = {'QUERY_STRING': 'a&b&c=&d=1'}
d = parse_querystring(e)
assert d == [('a', ''), ('b', ''), ('c', ''), ('d', '1')]
def make_post(body):
e = {
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': str(len(body)),
'REQUEST_METHOD': 'POST',
'wsgi.input': six.BytesIO(body),
}
return e
def test_parsevars():
e = make_post(b'a=1&b=2&c=3&b=4')
#cur_input = e['wsgi.input']
d = parse_formvars(e)
assert isinstance(d, MultiDict)
assert d == MultiDict([('a', '1'), ('b', '2'), ('c', '3'), ('b', '4')])
assert e['paste.parsed_formvars'] == (
(d, e['wsgi.input']))
# XXX: http://trac.pythonpaste.org/pythonpaste/ticket/125
#assert e['wsgi.input'] is not cur_input
#cur_input.seek(0)
#assert e['wsgi.input'].read() == cur_input.read()
| apache-2.0 |
tangyiyong/odoo | addons/account_payment/wizard/account_payment_order.py | 338 | 5906 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.tools.translate import _
class payment_order_create(osv.osv_memory):
"""
Create a payment object with lines corresponding to the account move line
to pay according to the date and the mode provided by the user.
Hypothesis:
- Small number of non-reconciled move line, payment mode and bank account type,
- Big number of partner and bank account.
If a type is given, unsuitable account Entry lines are ignored.
"""
_name = 'payment.order.create'
_description = 'payment.order.create'
_columns = {
'duedate': fields.date('Due Date', required=True),
'entries': fields.many2many('account.move.line', 'line_pay_rel', 'pay_id', 'line_id', 'Entries')
}
_defaults = {
'duedate': lambda *a: time.strftime('%Y-%m-%d'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if not context: context = {}
res = super(payment_order_create, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context and 'line_ids' in context:
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='entries']")
for node in nodes:
node.set('domain', '[("id", "in", '+ str(context['line_ids'])+')]')
res['arch'] = etree.tostring(doc)
return res
def create_payment(self, cr, uid, ids, context=None):
order_obj = self.pool.get('payment.order')
line_obj = self.pool.get('account.move.line')
payment_obj = self.pool.get('payment.line')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
line_ids = [entry.id for entry in data.entries]
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
payment = order_obj.browse(cr, uid, context['active_id'], context=context)
t = None
line2bank = line_obj.line2bank(cr, uid, line_ids, t, context)
## Finally populate the current payment with new lines:
for line in line_obj.browse(cr, uid, line_ids, context=context):
if payment.date_prefered == "now":
#no payment date => immediate payment
date_to_pay = False
elif payment.date_prefered == 'due':
date_to_pay = line.date_maturity
elif payment.date_prefered == 'fixed':
date_to_pay = payment.date_scheduled
payment_obj.create(cr, uid,{
'move_line_id': line.id,
'amount_currency': line.amount_residual_currency,
'bank_id': line2bank.get(line.id),
'order_id': payment.id,
'partner_id': line.partner_id and line.partner_id.id or False,
'communication': line.ref or '/',
'state': line.invoice and line.invoice.reference_type != 'none' and 'structured' or 'normal',
'date': date_to_pay,
'currency': (line.invoice and line.invoice.currency_id.id) or line.journal_id.currency.id or line.journal_id.company_id.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
def search_entries(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
search_due_date = data.duedate
# payment = self.pool.get('payment.order').browse(cr, uid, context['active_id'], context=context)
# Search for move line to pay:
domain = [('reconcile_id', '=', False), ('account_id.type', '=', 'payable'), ('credit', '>', 0), ('account_id.reconcile', '=', True)]
domain = domain + ['|', ('date_maturity', '<=', search_due_date), ('date_maturity', '=', False)]
line_ids = line_obj.search(cr, uid, domain, context=context)
context = dict(context, line_ids=line_ids)
model_data_ids = mod_obj.search(cr, uid,[('model', '=', 'ir.ui.view'), ('name', '=', 'view_create_payment_order_lines')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {'name': _('Entry Lines'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'payment.order.create',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
open-homeautomation/home-assistant | homeassistant/components/switch/hikvisioncam.py | 23 | 3253 | """
Support turning on/off motion detection on Hikvision cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.hikvision/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_PORT, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['hikvision==0.4']
_LOGGING = logging.getLogger(__name__)
DEFAULT_NAME = 'Hikvision Camera Motion Detection'
DEFAULT_PASSWORD = '12345'
DEFAULT_PORT = 80
DEFAULT_USERNAME = 'admin'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Hikvision camera."""
import hikvision.api
from hikvision.error import HikvisionError, MissingParamError
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
hikvision_cam = hikvision.api.CreateDevice(
host, port=port, username=username, password=password,
is_https=False)
except MissingParamError as param_err:
_LOGGING.error("Missing required param: %s", param_err)
return False
except HikvisionError as conn_err:
_LOGGING.error("Unable to connect: %s", conn_err)
return False
add_devices([HikvisionMotionSwitch(name, hikvision_cam)])
class HikvisionMotionSwitch(ToggleEntity):
"""Representation of a switch to toggle on/off motion detection."""
def __init__(self, name, hikvision_cam):
"""Initialize the switch."""
self._name = name
self._hikvision_cam = hikvision_cam
self._state = STATE_OFF
@property
def should_poll(self):
"""Poll for status regularly."""
return True
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def state(self):
"""Return the state of the device if any."""
return self._state
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
_LOGGING.info("Turning on Motion Detection ")
self._hikvision_cam.enable_motion_detection()
def turn_off(self, **kwargs):
"""Turn the device off."""
_LOGGING.info("Turning off Motion Detection ")
self._hikvision_cam.disable_motion_detection()
def update(self):
"""Update Motion Detection state."""
enabled = self._hikvision_cam.is_motion_detection_enabled()
_LOGGING.info("enabled: %s", enabled)
self._state = STATE_ON if enabled else STATE_OFF
| apache-2.0 |
sogelink/ansible | test/units/modules/network/ios/test_ios_banner.py | 62 | 2314 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_banner
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosBannerModule(TestIosModule):
module = ios_banner
def setUp(self):
self.mock_exec_command = patch('ansible.modules.network.ios.ios_banner.exec_command')
self.exec_command = self.mock_exec_command.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_exec_command.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('ios_banner_show_banner.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_ios_banner_create(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring'))
commands = ['banner login @\ntest\nbanner\nstring\n@']
self.execute_module(changed=True, commands=commands)
def test_ios_banner_remove(self):
set_module_args(dict(banner='login', state='absent'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_ios_banner_nochange(self):
banner_text = load_fixture('ios_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text))
self.execute_module()
| gpl-3.0 |
google/tink | python/tink/jwt/_jwt_key_templates.py | 1 | 4813 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Pre-generated JWT KeyTemplate."""
from tink.proto import jwt_ecdsa_pb2
from tink.proto import jwt_hmac_pb2
from tink.proto import jwt_rsa_ssa_pkcs1_pb2
from tink.proto import jwt_rsa_ssa_pss_pb2
from tink.proto import tink_pb2
_F4 = 65537
# TODO(juerg): Add TINK key templates.
def _create_jwt_hmac_template(algorithm: jwt_hmac_pb2.JwtHmacAlgorithm,
key_size: int) -> tink_pb2.KeyTemplate:
key_format = jwt_hmac_pb2.JwtHmacKeyFormat(
algorithm=algorithm, key_size=key_size)
return tink_pb2.KeyTemplate(
type_url='type.googleapis.com/google.crypto.tink.JwtHmacKey',
value=key_format.SerializeToString(),
output_prefix_type=tink_pb2.RAW)
def _create_jwt_ecdsa_template(
algorithm: jwt_ecdsa_pb2.JwtEcdsaAlgorithm) -> tink_pb2.KeyTemplate:
key_format = jwt_ecdsa_pb2.JwtEcdsaKeyFormat(
algorithm=algorithm)
return tink_pb2.KeyTemplate(
type_url='type.googleapis.com/google.crypto.tink.JwtEcdsaPrivateKey',
value=key_format.SerializeToString(),
output_prefix_type=tink_pb2.RAW)
# TODO(juerg): Move this function into a util lib.
def _num_to_bytes(n: int) -> bytes:
"""Converts a number to bytes."""
if n < 0:
raise OverflowError("number can't be negative")
if n == 0:
return b'\x00'
octets = bytearray()
while n:
octets.append(n % 256)
n //= 256
return bytes(octets[::-1])
def _create_jwt_rsa_ssa_pkcs1_template(
algorithm: jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1Algorithm,
modulus_size: int
) -> tink_pb2.KeyTemplate:
key_format = jwt_rsa_ssa_pkcs1_pb2.JwtRsaSsaPkcs1KeyFormat(
algorithm=algorithm,
modulus_size_in_bits=modulus_size,
public_exponent=_num_to_bytes(_F4))
return tink_pb2.KeyTemplate(
type_url='type.googleapis.com/google.crypto.tink.JwtRsaSsaPkcs1PrivateKey',
value=key_format.SerializeToString(),
output_prefix_type=tink_pb2.RAW)
def _create_jwt_rsa_ssa_pss_template(
algorithm: jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssAlgorithm,
modulus_size: int
) -> tink_pb2.KeyTemplate:
key_format = jwt_rsa_ssa_pss_pb2.JwtRsaSsaPssKeyFormat(
algorithm=algorithm,
modulus_size_in_bits=modulus_size,
public_exponent=_num_to_bytes(_F4))
return tink_pb2.KeyTemplate(
type_url='type.googleapis.com/google.crypto.tink.JwtRsaSsaPssPrivateKey',
value=key_format.SerializeToString(),
output_prefix_type=tink_pb2.RAW)
# Hmac Templates
def jwt_hs256_template() -> tink_pb2.KeyTemplate:
return _create_jwt_hmac_template(jwt_hmac_pb2.HS256, 32)
def jwt_hs384_template() -> tink_pb2.KeyTemplate:
return _create_jwt_hmac_template(jwt_hmac_pb2.HS384, 48)
def jwt_hs512_template() -> tink_pb2.KeyTemplate:
return _create_jwt_hmac_template(jwt_hmac_pb2.HS512, 64)
# ECDSA Templates
def jwt_es256_template() -> tink_pb2.KeyTemplate:
return _create_jwt_ecdsa_template(jwt_ecdsa_pb2.ES256)
def jwt_es384_template() -> tink_pb2.KeyTemplate:
return _create_jwt_ecdsa_template(jwt_ecdsa_pb2.ES384)
def jwt_es512_template() -> tink_pb2.KeyTemplate:
return _create_jwt_ecdsa_template(jwt_ecdsa_pb2.ES512)
# RSA SSA PKCS1 Templates
def jwt_rs256_2048_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pkcs1_template(jwt_rsa_ssa_pkcs1_pb2.RS256, 2048)
def jwt_rs256_3072_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pkcs1_template(jwt_rsa_ssa_pkcs1_pb2.RS256, 3072)
def jwt_rs384_3072_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pkcs1_template(jwt_rsa_ssa_pkcs1_pb2.RS384, 3072)
def jwt_rs512_4096_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pkcs1_template(jwt_rsa_ssa_pkcs1_pb2.RS512, 4096)
# RSA SSA PSS Templates
def jwt_ps256_2048_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pss_template(jwt_rsa_ssa_pss_pb2.PS256, 2048)
def jwt_ps256_3072_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pss_template(jwt_rsa_ssa_pss_pb2.PS256, 3072)
def jwt_ps384_3072_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pss_template(jwt_rsa_ssa_pss_pb2.PS384, 3072)
def jwt_ps512_4096_f4_template() -> tink_pb2.KeyTemplate:
return _create_jwt_rsa_ssa_pss_template(jwt_rsa_ssa_pss_pb2.PS512, 4096)
| apache-2.0 |
xujun10110/w3a_Scan_Console | plugin/NmapModule.py | 2 | 1554 | #!/usr/bin/python
#-*- encoding: utf-8 -*-
###########################################
#
# Nmap 扫描类(生成扫描结果)
#
###########################################
import nmap
import time
import sys
class Web_Scan_Frame:
def setScan_Main(self, scan_main):
self.scan_main=scan_main
def start(self,target,temple):
# 在这要判断temple的类型,如果类型中存在该扫描项目
# 则运行
# 否则就忽略
self.__object_do(target)
# 放置:扫描目标,扫描模板
def __object_do(self,target):
# 需要根据扫描模板来查询是否有该模块扫描的功能
self.scan_main.print_log('Nmap Scan Target: %s' % target)
nm=nmap.PortScanner()
# 判断长度,查看到底是多个还是一个。
if(target.split(';') >=2):
self.target_list=" ".join(target.split(';'))
else:
self.target_list=target
nm.scan(hosts=self.target_list,arguments='-T4 -O')
# 操作扫描结果
for ip in nm.all_hosts():
self.scan_main.print_log('Result ip: %s' % ip)
for item in nm[ip].all_protocols():
# 系统指纹识别
if item=="osmatch":
for os in nm[ip]['osmatch']:
self.scan_main.print_log("os name: %s, persend: %s%% " % (os['name'],os['accuracy']))
elif item=="tcp":
for port in nm[ip]['tcp'].keys():
self.scan_main.print_log("port: %s ,status: %s , servie: %s" % (port,nm[ip]['tcp'][port]['state'],nm[ip]['tcp'][port]['name']))
def stop(self):
self.scan_main.print_log("Finish Nmap target:%s" % self.target_list)
def getPluginClass():
return Web_Scan_Frame
| gpl-2.0 |
abhinavp13/IITBX-edx-platform-dev | common/lib/xmodule/xmodule/modulestore/tests/django_utils.py | 2 | 5804 |
import copy
from uuid import uuid4
from django.test import TestCase
from django.conf import settings
import xmodule.modulestore.django
from xmodule.templates import update_templates
def mongo_store_config(data_dir):
"""
Defines default module store using MongoModuleStore.
Use of this config requires mongo to be running.
"""
store = {
'default': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'OPTIONS': {
'default_class': 'xmodule.raw_module.RawDescriptor',
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore_%s' % uuid4().hex,
'fs_root': data_dir,
'render_template': 'mitxmako.shortcuts.render_to_string'
}
}
}
store['direct'] = store['default']
return store
def draft_mongo_store_config(data_dir):
"""
Defines default module store using DraftMongoModuleStore.
"""
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'host': 'localhost',
'db': 'test_xmodule',
'collection': 'modulestore_%s' % uuid4().hex,
'fs_root': data_dir,
'render_template': 'mitxmako.shortcuts.render_to_string'
}
return {
'default': {
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'OPTIONS': modulestore_options
},
'direct': {
'ENGINE': 'xmodule.modulestore.mongo.MongoModuleStore',
'OPTIONS': modulestore_options
}
}
def xml_store_config(data_dir):
"""
Defines default module store using XMLModuleStore.
"""
return {
'default': {
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': data_dir,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
}
class ModuleStoreTestCase(TestCase):
""" Subclass for any test case that uses the mongodb
module store. This populates a uniquely named modulestore
collection with templates before running the TestCase
and drops it they are finished. """
@staticmethod
def update_course(course, data):
"""
Updates the version of course in the modulestore
with the metadata in 'data' and returns the updated version.
'course' is an instance of CourseDescriptor for which we want
to update metadata.
'data' is a dictionary with an entry for each CourseField we want to update.
"""
store = xmodule.modulestore.django.modulestore()
store.update_metadata(course.location, data)
updated_course = store.get_instance(course.id, course.location)
return updated_course
@staticmethod
def flush_mongo_except_templates():
"""
Delete everything in the module store except templates.
"""
modulestore = xmodule.modulestore.django.modulestore()
# This query means: every item in the collection
# that is not a template
query = {"_id.course": {"$ne": "templates"}}
# Remove everything except templates
modulestore.collection.remove(query)
modulestore.collection.drop()
@staticmethod
def load_templates_if_necessary():
"""
Load templates into the direct modulestore only if they do not already exist.
We need the templates, because they are copied to create
XModules such as sections and problems.
"""
modulestore = xmodule.modulestore.django.modulestore('direct')
# Count the number of templates
query = {"_id.course": "templates"}
num_templates = modulestore.collection.find(query).count()
if num_templates < 1:
update_templates(modulestore)
@classmethod
def setUpClass(cls):
"""
Flush the mongo store and set up templates.
"""
# Use a uuid to differentiate
# the mongo collections on jenkins.
cls.orig_modulestore = copy.deepcopy(settings.MODULESTORE)
if 'direct' not in settings.MODULESTORE:
settings.MODULESTORE['direct'] = settings.MODULESTORE['default']
settings.MODULESTORE['default']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex
settings.MODULESTORE['direct']['OPTIONS']['collection'] = 'modulestore_%s' % uuid4().hex
xmodule.modulestore.django._MODULESTORES.clear()
print settings.MODULESTORE
TestCase.setUpClass()
@classmethod
def tearDownClass(cls):
"""
Revert to the old modulestore settings.
"""
# Clean up by dropping the collection
modulestore = xmodule.modulestore.django.modulestore()
modulestore.collection.drop()
xmodule.modulestore.django._MODULESTORES.clear()
# Restore the original modulestore settings
settings.MODULESTORE = cls.orig_modulestore
def _pre_setup(self):
"""
Remove everything but the templates before each test.
"""
# Flush anything that is not a template
ModuleStoreTestCase.flush_mongo_except_templates()
# Check that we have templates loaded; if not, load them
ModuleStoreTestCase.load_templates_if_necessary()
# Call superclass implementation
super(ModuleStoreTestCase, self)._pre_setup()
def _post_teardown(self):
"""
Flush everything we created except the templates.
"""
# Flush anything that is not a template
ModuleStoreTestCase.flush_mongo_except_templates()
# Call superclass implementation
super(ModuleStoreTestCase, self)._post_teardown()
| agpl-3.0 |
ramrom/haus | gmail.py | 1 | 2126 | #!/usr/local/bin/python
import httplib2
import os, pdb
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = '/Users/smittapalli/.creds/gcloud_oauth2_webapp_haus.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.creds')
credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if __name__ == '__main__':
pdb.set_trace()
#main()
| mit |
Charlotte-Morgan/inasafe | safe_extras/raven/utils/compat.py | 12 | 5676 | """
raven.utils.compat
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
Utilities for writing code that runs on Python 2 and 3
"""
# flake8: noqa
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import operator
import sys
import types
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
Iterator = object
else:
class Iterator(object):
def next(self):
return type(self).__next__(self)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
import StringIO
StringIO = BytesIO = StringIO.StringIO
if PY3:
exec_ = getattr(__import__('builtins'), 'exec')
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
if PY3:
from urllib.error import HTTPError
from http import client as httplib
import urllib.request as urllib2
from queue import Queue
from urllib.parse import quote as urllib_quote
from urllib import parse as urlparse
else:
from urllib2 import HTTPError
import httplib
import urllib2
from Queue import Queue
from urllib import quote as urllib_quote
import urlparse
def get_code(func):
rv = getattr(func, '__code__', getattr(func, 'func_code', None))
if rv is None:
raise TypeError('Could not get code from %r' % type(func).__name__)
return rv
def check_threads():
try:
from uwsgi import opt
except ImportError:
return
# When `threads` is passed in as a uwsgi option,
# `enable-threads` is implied on.
if 'threads' in opt:
return
if str(opt.get('enable-threads', '0')).lower() in ('false', 'off', 'no', '0'):
from warnings import warn
warn(Warning('We detected the use of uwsgi with disabled threads. '
'This will cause issues with the transport you are '
'trying to use. Please enable threading for uwsgi. '
'(Enable the "enable-threads" flag).'))
| gpl-3.0 |
jandecaluwe/myhdl | example/arith_lib/test_LeadZeroDet.py | 4 | 1809 | import unittest
from unittest import TestCase
import myhdl
from myhdl import *
from arith_utils import BEHAVIOR, STRUCTURE
from arith_utils import SLOW, FAST
from LeadZeroDet import LeadZeroDet
import random
random.seed = 1
from random import random
class LeadZeroDetTest(TestCase):
""" Leading zeroes detector unit test class """
def bench(self, width, speed, nrsamples=0):
""" Leading zeroes detector test bench
width -- decrementer bit width
speed -- SLOW, MEDIUM or FAST
nrsamples -- required number of random samples, or exhaustive
test if not set (default)
"""
A = Signal(intbv(0))
ZS = Signal(intbv(0))
ZB = Signal(intbv(0))
beh = LeadZeroDet(width, speed, A, ZB, architecture=BEHAVIOR)
str = LeadZeroDet(width, speed, A, ZS, architecture=STRUCTURE)
@instance
def stimulus():
if nrsamples:
vals = [int(random()*(2**width)) for i in range(nrsamples)]
else:
vals = range(2**width)
for i in vals:
A.next = intbv(i)
yield delay(10)
self.assertEqual(ZS, ZB)
return (beh, str, stimulus)
def testLeadZeroDetSmallSlow(self):
Simulation(self.bench(width=8, speed=SLOW)).run()
def testLeadZeroDetLargeSlow(self):
Simulation(self.bench(width=39, speed=SLOW, nrsamples=16)).run()
def testLeadZeroDetSmallFast(self):
Simulation(self.bench(width=8, speed=FAST)).run()
def testLeadZeroDetLargeFast(self):
Simulation(self.bench(width=39, speed=FAST, nrsamples=16)).run()
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 |
jgeskens/django | tests/model_fields/models.py | 6 | 7517 | import os
import tempfile
from django.core.exceptions import ImproperlyConfigured
try:
from django.utils.image import Image
except ImproperlyConfigured:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1,'First'),
(2,'Second'),
)
),
('Group 2', (
(3,'Third'),
(4,'Fourth'),
)
),
(0,'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class BigS(models.Model):
s = models.SlugField(max_length=255)
class BigInt(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null = True, blank = True)
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField()
string = models.CharField(max_length=10, default='abc')
class FksToBooleans(models.Model):
"""Model wih FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1,'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2")
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow/PIL in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
# This model isn't used in any test, just here to ensure it validates successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow/PIL available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args,**kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class PersonWithHeight(models.Model):
"""
Model that defines an ImageField with only one dimension field.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
| bsd-3-clause |
reyoung/Paddle | python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py | 5 | 3938 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
class TestBeamSearchDecodeOp(unittest.TestCase):
"""unittest of beam_search_decode_op"""
def setUp(self):
self.scope = core.Scope()
self.place = core.CPUPlace()
def append_lod_tensor(self, tensor_array, lod, data):
lod_tensor = core.LoDTensor()
lod_tensor.set_lod(lod)
lod_tensor.set(data, self.place)
tensor_array.append(lod_tensor)
def test_get_set(self):
ids = self.scope.var("ids").get_lod_tensor_array()
scores = self.scope.var("scores").get_lod_tensor_array()
# Construct sample data with 5 steps and 2 source sentences
# beam_size = 2, end_id = 1
# start with start_id
[
self.append_lod_tensor(
array, [[0, 1, 2], [0, 1, 2]], np.array(
[0, 0], dtype=dtype))
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array, [[0, 1, 2], [0, 2, 4]],
np.array(
[2, 3, 4, 5], dtype=dtype))
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array, [[0, 2, 4], [0, 2, 2, 4, 4]],
np.array(
[3, 1, 5, 4], dtype=dtype))
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array, [[0, 2, 4], [0, 1, 2, 3, 4]],
np.array(
[1, 1, 3, 5], dtype=dtype))
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
[
self.append_lod_tensor(
array, [[0, 2, 4], [0, 0, 0, 2, 2]],
np.array(
[5, 1], dtype=dtype))
for array, dtype in ((ids, "int64"), (scores, "float32"))
]
sentence_ids = self.scope.var("sentence_ids").get_tensor()
sentence_scores = self.scope.var("sentence_scores").get_tensor()
beam_search_decode_op = Operator(
"beam_search_decode",
# inputs
Ids="ids",
Scores="scores",
# outputs
SentenceIds="sentence_ids",
SentenceScores="sentence_scores",
beam_size=2,
end_id=1, )
beam_search_decode_op.run(self.scope, self.place)
expected_lod = [[0, 2, 4], [0, 4, 7, 12, 17]]
self.assertEqual(sentence_ids.lod(), expected_lod)
self.assertEqual(sentence_scores.lod(), expected_lod)
expected_data = np.array(
[0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64")
self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))
self.assertTrue(
np.array_equal(np.array(sentence_scores), expected_data))
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestBeamSearchDecodeOpGPU(TestBeamSearchDecodeOp):
def setUp(self):
self.scope = core.Scope()
self.place = core.CUDAPlace(0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/test/test_pdb.py | 71 | 11225 | # A test suite for pdb; at the moment, this only validates skipping of
# specified test modules (RFE #5142).
import imp
import sys
import os
import unittest
import subprocess
import textwrap
from test import test_support
# This little helper class is essential for testing pdb under doctest.
from test_doctest import _FakeInput
class PdbTestCase(unittest.TestCase):
def run_pdb(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(test_support.unlink, filename)
cmd = [sys.executable, '-m', 'pdb', filename]
stdout = stderr = None
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate(commands)
proc.stdout.close()
proc.stdin.close()
return stdout, stderr
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(test_support.unlink, 'bar.py')
stdout, stderr = self.run_pdb(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
def __exit__(self, *exc):
sys.stdin = self.real_stdin
def write(x):
print x
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb().set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): write(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): write(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb().set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'print 42',
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
(Pdb) clear 1
Deleted breakpoint 1
(Pdb) commands 2
(com) print 42
(com) end
(Pdb) continue
1
42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['string*']).set_trace()
... string.lower('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.lower('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.lower('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = imp.new_module('module_to_skip')
exec 'def foo_pony(callback): x = 1; callback(); return None' in mod.__dict__
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*']).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb()
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
>>> with PdbTestInput([
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint 1 at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
class ModuleInitTester(unittest.TestCase):
def test_filename_correct(self):
"""
In issue 7750, it was found that if the filename has a sequence that
resolves to an escape character in a Python string (such as \t), it
will be treated as the escaped character.
"""
# the test_fn must contain something like \t
# on Windows, this will create 'test_mod.py' in the current directory.
# on Unix, this will create '.\test_mod.py' in the current directory.
test_fn = '.\\test_mod.py'
code = 'print("testing pdb")'
with open(test_fn, 'w') as f:
f.write(code)
self.addCleanup(os.remove, test_fn)
cmd = [sys.executable, '-m', 'pdb', test_fn,]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate('quit\n')
self.assertIn(code, stdout, "pdb munged the filename")
def test_main():
from test import test_pdb
test_support.run_doctest(test_pdb, verbosity=True)
test_support.run_unittest(
PdbTestCase,
ModuleInitTester)
if __name__ == '__main__':
test_main()
| apache-2.0 |
Moriadry/tensorflow | tensorflow/python/kernel_tests/functional_ops_test.py | 78 | 18206 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
class FunctionalOpsTest(test.TestCase):
def testFoldl_Simple(self):
with self.test_session():
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, r.eval())
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, r.eval())
def testFoldl_Scoped(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, r.eval())
def testFoldr_Simple(self):
with self.test_session():
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, r.eval())
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, r.eval())
def testFoldr_Scoped(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, r.eval())
# pylint: disable=unnecessary-lambda
def testFold_Grad(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
# pylint: enable=unnecessary-lambda
def testMap_Simple(self):
with self.test_session():
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, name="data")
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
def testMapSparseTensor(self):
with self.test_session():
with self.assertRaises(TypeError):
functional_ops.map_fn(
lambda x: x,
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=constant_op.constant([0, 1, 2]),
dense_shape=[2, 2]))
def testMap_Scoped(self):
with self.test_session() as sess:
def double_scoped(x):
"""2x with a dummy 2 that is scoped."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(x, two)
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
doubles = np.array([2 * x for x in [1, 2, 3, 4, 5, 6]])
r = functional_ops.map_fn(double_scoped, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(doubles, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.map_fn(double_scoped, elems)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(doubles, r.eval())
def testMap_Grad(self):
with self.test_session():
param = constant_op.constant(2.0)
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
y = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
r = gradients_impl.gradients(y, param)[0]
self.assertAllEqual(91.0, r.eval())
r = gradients_impl.gradients(y, elems)[0]
self.assertAllEqual([4.0, 8.0, 12.0, 16.0, 20.0, 24.0], r.eval())
def testMap_SimpleNotTensor(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
def testMap_SingleInputMultiOutput(self):
with self.test_session() as sess:
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=(dtypes.int64, dtypes.int64))
self.assertEqual(2, len(r))
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
received = sess.run(r)
self.assertAllEqual((nums + 3) * 2, received[0])
self.assertAllEqual(-(nums + 3) * 2, received[1])
def testMap_MultiOutputMismatchedDtype(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
with self.assertRaisesRegexp(
TypeError, r"two structures don't have the same sequence type."):
# lambda emits tuple, but dtype is a list
functional_ops.map_fn(
lambda x: ((x + 3) * 2, -(x + 3) * 2),
nums,
dtype=[dtypes.int64, dtypes.int64])
def testMap_MultiInputSingleOutput(self):
with self.test_session():
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(
lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)),
dtype=dtypes.int64)
self.assertEqual((6,), r.get_shape())
received = r.eval()
self.assertAllEqual(nums * nums + (-nums), received)
def testMap_MultiInputSameStructureOutput(self):
with self.test_session() as sess:
nums = np.array([1, 2, 3, 4, 5, 6])
r = functional_ops.map_fn(lambda x: (x[1][0], (x[1][1], x[0])),
(nums, (2 * nums, -nums)))
r = [r[0], r[1][0], r[1][1]]
self.assertEqual((6,), r[0].get_shape())
self.assertEqual((6,), r[1].get_shape())
self.assertEqual((6,), r[2].get_shape())
received = sess.run(r)
self.assertAllEqual(2 * nums, received[0])
self.assertAllEqual(-nums, received[1])
self.assertAllEqual(nums, received[2])
def testScan_Simple(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], r.eval())
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], r.eval())
# pylint: enable=unnecessary-lambda
def testScan_SingleInputMultiOutput(self):
with self.test_session() as sess:
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = sess.run(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
def testScan_MultiInputSingleOutput(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], r.eval())
def testScan_MultiInputSameTypeOutput(self):
with self.test_session() as sess:
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = sess.run(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
def testScan_MultiOutputMismatchedInitializer(self):
with self.test_session():
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegexp(
ValueError, "two structures don't have the same number of elements"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
def testScan_Scoped(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, r.eval())
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, r.eval())
def testScanFoldl_Nested(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], r.eval())
def testScan_Control(self):
with self.test_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
def testScan_Grad(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, r.eval())
def testFoldShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testMapShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
y = functional_ops.map_fn(lambda e: e, x)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testMapUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
y = functional_ops.map_fn(lambda e: e, x)
self.assertIs(None, y.get_shape().dims)
def testMapEmptyScalar(self):
with self.test_session():
map_return = functional_ops.map_fn(lambda x: 1, constant_op.constant([]))
self.assertAllEqual([0], map_return.get_shape().dims)
self.assertAllEqual([0], map_return.eval().shape)
def testMapEmptyTensor(self):
with self.test_session():
map_return = functional_ops.map_fn(lambda x: array_ops.zeros([3, 2]),
constant_op.constant([]))
self.assertAllEqual([0, 3, 2], map_return.get_shape().dims)
self.assertAllEqual([0, 3, 2], map_return.eval().shape)
def testScanShape(self):
with self.test_session():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), y.eval().shape)
def testScanEmptyTensor(self):
with self.test_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), x.eval().shape)
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
def testScanVaryingShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
if __name__ == "__main__":
test.main()
| apache-2.0 |
ESOedX/edx-platform | common/test/acceptance/tests/lms/test_problem_types.py | 1 | 72649 | """
Bok choy acceptance and a11y tests for problem types in the LMS
"""
from __future__ import absolute_import
import random
import textwrap
from abc import ABCMeta, abstractmethod
import ddt
import pytest
import six
from bok_choy.promise import BrokenPromise
from selenium.webdriver import ActionChains
from six.moves import range
from capa.tests.response_xml_factory import (
AnnotationResponseXMLFactory,
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
JSInputXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
SymbolicResponseXMLFactory
)
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.tests.helpers import EventsTestMixin, select_option_by_text
from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest
from openedx.core.lib.tests import attr
class ProblemTypeTestBaseMeta(ABCMeta):
"""
MetaClass for ProblemTypeTestBase to ensure that the required attributes
are defined in the inheriting classes.
"""
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
required_attrs = [
'problem_name',
'problem_type',
'factory',
'factory_kwargs',
'status_indicators',
]
for required_attr in required_attrs:
msg = (u'{} is a required attribute for {}').format(
required_attr, str(cls)
)
try:
if obj.__getattribute__(required_attr) is None:
raise NotImplementedError(msg)
except AttributeError:
raise NotImplementedError(msg)
return obj
class ProblemTypeTestBase(six.with_metaclass(ProblemTypeTestBaseMeta, ProblemsTest, EventsTestMixin)):
"""
Base class for testing assesment problem types in bok choy.
This inherits from ProblemsTest, which has capabilities for testing problem
features that are not problem type specific (checking, hinting, etc.).
The following attributes must be explicitly defined when inheriting from
this class:
problem_name (str)
problem_type (str)
factory (ResponseXMLFactory subclass instance)
Additionally, the default values for factory_kwargs and status_indicators
may need to be overridden for some problem types.
"""
problem_name = None
problem_type = None
problem_points = 1
factory = None
factory_kwargs = {}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['span.submitted'],
'unsubmitted': ['.unsubmitted']
}
def setUp(self):
"""
Visits courseware_page and defines self.problem_page.
"""
super(ProblemTypeTestBase, self).setUp()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def get_sequential(self):
""" Allow any class in the inheritance chain to customize subsection metadata."""
return XBlockFixtureDesc('sequential', 'Test Subsection', metadata=getattr(self, 'sequential_metadata', {}))
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always', 'show_reset_button': True}
)
def wait_for_status(self, status):
"""
Waits for the expected status indicator.
Args:
status: one of ("correct", "incorrect", "unanswered", "submitted")
"""
msg = u"Wait for status to be {}".format(status)
selector = ', '.join(self.status_indicators[status])
self.problem_page.wait_for_element_visibility(selector, msg)
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
@abstractmethod
def answer_problem(self, correctness):
"""
Args:
`correct` (bool): Inputs correct answer if True, else inputs
incorrect answer.
"""
raise NotImplementedError()
class ProblemTypeA11yTestMixin(object):
"""
Shared a11y tests for all problem types.
"""
@attr('a11y')
def test_problem_type_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Set the scope to the problem container
self.problem_page.a11y_audit.config.set_scope(
include=['div#seq_content']
)
# Run the accessibility audit.
self.problem_page.a11y_audit.check_for_accessibility_errors()
@ddt.ddt
class ProblemTypeTestMixin(ProblemTypeA11yTestMixin):
"""
Test cases shared amongst problem types.
"""
can_submit_blank = False
can_update_save_notification = True
@attr(shard=11)
def test_answer_correctly(self):
"""
Scenario: I can answer a problem correctly
Given External graders respond "correct"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "correctly"
Then my "<ProblemType>" answer is marked "correct"
And The "<ProblemType>" problem displays a "correct" answer
And a success notification is shown
And clicking on "Review" moves focus to the problem meta area
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem correctly
self.answer_problem(correctness='correct')
self.problem_page.click_submit()
self.wait_for_status('correct')
self.problem_page.wait_success_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='submit')
self.problem_page.wait_for_focus_on_problem_meta()
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
@attr(shard=11)
def test_answer_incorrectly(self):
"""
Scenario: I can answer a problem incorrectly
Given External graders respond "incorrect"
And I am viewing a "<ProblemType>" problem
When I answer a "<ProblemType>" problem "incorrectly"
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "incorrect" answer
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem incorrectly
self.answer_problem(correctness='incorrect')
self.problem_page.click_submit()
self.wait_for_status('incorrect')
self.problem_page.wait_incorrect_notification()
@attr(shard=11)
def test_submit_blank_answer(self):
"""
Scenario: I can submit a blank answer
Given I am viewing a "<ProblemType>" problem
When I submit a problem
Then my "<ProblemType>" answer is marked "incorrect"
And The "<ProblemType>" problem displays a "blank" answer
"""
if not self.can_submit_blank:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Leave the problem unchanged and assure submit is disabled.
self.wait_for_status('unanswered')
self.assertFalse(self.problem_page.is_submit_disabled())
self.problem_page.click_submit()
self.wait_for_status('incorrect')
@attr(shard=11)
def test_cant_submit_blank_answer(self):
"""
Scenario: I can't submit a blank answer
When I try to submit blank answer
Then I can't submit a problem
"""
if self.can_submit_blank:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.assertTrue(self.problem_page.is_submit_disabled())
@attr(shard=12)
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
And I should see question's solution
And I should see the problem title is focused
"""
self.problem_page.click_show()
self.problem_page.wait_for_show_answer_notification()
@attr(shard=12)
def test_save_reaction(self):
"""
Scenario: Verify that the save button performs as expected with problem types
Given that I am on a problem page
And I can see a CAPA problem with the Save button present
When I select and answer and click the "Save" button
Then I should see the Save notification
And the Save button should not be disabled
And clicking on "Review" moves focus to the problem meta area
And if I change the answer selected
Then the Save notification should be removed
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.problem_page.wait_for_page()
self.answer_problem(correctness='correct')
self.assertTrue(self.problem_page.is_save_button_enabled())
self.problem_page.click_save()
# Ensure "Save" button is enabled after save is complete.
self.assertTrue(self.problem_page.is_save_button_enabled())
self.problem_page.wait_for_save_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='save')
self.problem_page.wait_for_focus_on_problem_meta()
# Not all problems will detect the change and remove the save notification
if self.can_update_save_notification:
self.answer_problem(correctness='incorrect')
self.assertFalse(self.problem_page.is_save_notification_visible())
@attr(shard=12)
def test_reset_shows_errors(self):
"""
Scenario: Reset will show server errors
If I reset a problem without first answering it
Then a "gentle notification" is shown
And the focus moves to the "gentle notification"
"""
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.wait_for_status('unanswered')
self.assertFalse(self.problem_page.is_gentle_alert_notification_visible())
# Click reset without first answering the problem (possible because show_reset_button is set to True)
self.problem_page.click_reset()
self.problem_page.wait_for_gentle_alert_notification()
@attr(shard=12)
def test_partially_complete_notifications(self):
"""
Scenario: If a partially correct problem is submitted the correct notification is shown
If I submit an answer that is partially correct
Then the partially correct notification should be shown
"""
# Not all problems have partially correct solutions configured
if not self.partially_correct:
pytest.skip("Test incompatible with the current problem type")
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
self.wait_for_status('unanswered')
# Set an answer
self.answer_problem(correctness='partially-correct')
self.problem_page.click_submit()
self.problem_page.wait_partial_notification()
@ddt.data('correct', 'incorrect')
def test_reset_problem(self, correctness):
"""
Scenario: I can reset a problem
Given I am viewing a problem with randomization: always and with reset button: on
And I answer a problem as <correctness>
When I reset the problem
Then my answer is marked "unanswered"
And The problem displays a "blank" answer
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.problem_page.click_reset()
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class ChangingAnswerOfProblemTestMixin(object):
"""
Test the effect of changing the answers of problem
"""
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_checkbox_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on problem when I answer it and after I reset it
Given I am viewing problem
When I answer problem with <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a problem after changing my answer
Given I am viewing problem
Then my problem's answer is marked "unanswered"
When I answer and submit the problem with <initial correctness>
Then my problem's answer is marked with <initial correctness>
And I input an answer as <other correctness>
Then my problem's answer is marked "unanswered"
"""
self.assertTrue(self.problem_status('unanswered'))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.answer_problem(other_correctness)
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class NonRandomizedProblemTypeTestMixin(ProblemTypeA11yTestMixin):
"""
Test the effect of 'randomization: never'
"""
can_submit_blank = False
can_update_save_notification = True
def test_non_randomized_problem_correctly(self):
"""
Scenario: The reset button doesn't show up
Given I am viewing a problem with "randomization": never and with "reset button": on
And I answer problem problem problem correctly
Then The "Reset" button does not appear
"""
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
def test_non_randomized_problem_incorrectly(self):
"""
Scenario: I can reset a non-randomized problem that I answered incorrectly
Given I am viewing problem with "randomization": never and with "reset button": on
And I answer problem incorrectly
When I reset the problem
Then my problem answer is marked "unanswered"
And the problem problem displays a "blank" answer
"""
self.answer_problem("incorrect")
self.problem_page.click_submit()
self.problem_page.click_reset()
self.assertTrue(self.problem_status('unanswered'))
@ddt.ddt
class ProblemNeverShowCorrectnessMixin(object):
"""
Tests the effect of adding `show_correctness: never` to the sequence metadata
for subclasses of ProblemTypeTestMixin.
"""
sequential_metadata = {'show_correctness': 'never'}
@attr(shard=7)
@ddt.data('correct', 'incorrect', 'partially-correct')
def test_answer_says_submitted(self, correctness):
"""
Scenario: I can answer a problem <Correctness>ly
Given External graders respond "<Correctness>"
And I am viewing a "<ProblemType>" problem
in a subsection with show_correctness set to "never"
Then I should see a score of "N point(s) possible (ungraded, results hidden)"
When I answer a "<ProblemType>" problem "<Correctness>ly"
And the "<ProblemType>" problem displays only a "submitted" notification.
And I should see a score of "N point(s) possible (ungraded, results hidden)"
And a "problem_check" server event is emitted
And a "problem_check" browser event is emitted
"""
# Not all problems have partially correct solutions configured
if correctness == 'partially-correct' and not self.partially_correct:
pytest.skip("Test incompatible with the current problem type")
# Problem progress text depends on points possible
possible = 'possible (ungraded, results hidden)'
if self.problem_points == 1:
problem_progress = u'1 point {}'.format(possible)
else:
problem_progress = u'{} points {}'.format(self.problem_points, possible)
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Learner can see that score will be hidden prior to submitting answer
self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
# Answer the problem correctly
self.answer_problem(correctness=correctness)
self.problem_page.click_submit()
self.wait_for_status('submitted')
self.problem_page.wait_submitted_notification()
# Score is still hidden after submitting answer
self.assertEqual(self.problem_page.problem_progress_graded_value, problem_progress)
# Check for corresponding tracking event
expected_events = [
{
'event_source': 'server',
'event_type': 'problem_check',
'username': self.username,
}, {
'event_source': 'browser',
'event_type': 'problem_check',
'username': self.username,
},
]
for event in expected_events:
self.wait_for_events(event_filter=event, number_of_matches=1)
class AnnotationProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Annotation Problem Type
"""
problem_name = 'ANNOTATION TEST PROBLEM'
problem_type = 'annotationresponse'
problem_points = 2
factory = AnnotationResponseXMLFactory()
partially_correct = True
can_submit_blank = True
can_update_save_notification = False
factory_kwargs = {
'title': 'Annotation Problem',
'text': 'The text being annotated',
'comment': 'What do you think the about this text?',
'comment_prompt': 'Type your answer below.',
'tag_prompt': 'Which of these items most applies to the text?',
'options': [
('dog', 'correct'),
('cat', 'incorrect'),
('fish', 'partially-correct'),
]
}
status_indicators = {
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'partially-correct': ['span.partially-correct'],
'unanswered': ['span.unanswered'],
'submitted': ['span.submitted'],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for AnnotationProblemTypeBase
"""
super(AnnotationProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'label', # TODO: AC-491
'label-title-only', # TODO: AC-493
]
})
def answer_problem(self, correctness):
"""
Answer annotation problem.
"""
if correctness == 'correct':
choice = 0
elif correctness == 'partially-correct':
choice = 2
else:
choice = 1
answer = 'Student comment'
self.problem_page.q(css='div.problem textarea.comment').fill(answer)
self.problem_page.q(
css='div.problem span.tag'.format(choice=choice)
).nth(choice).click()
class AnnotationProblemTypeTest(AnnotationProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Annotation Problem Type
"""
shard = 24
pass
class AnnotationProblemTypeNeverShowCorrectnessTest(AnnotationProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Annotation Problem Type problems.
"""
pass
class CheckboxProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization Checkbox Problem Type
"""
problem_name = 'CHECKBOX TEST PROBLEM'
problem_type = 'checkbox'
partially_correct = True
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and Choice 2, Choice 1 and Choice 3 together are incorrect.',
'choice_type': 'checkbox',
'credit_type': 'edc',
'choices': [True, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
'explanation_text': 'This is explanation text'
}
def answer_problem(self, correctness):
"""
Answer checkbox problem.
"""
if correctness == 'correct':
self.problem_page.click_choice("choice_0")
self.problem_page.click_choice("choice_2")
elif correctness == 'partially-correct':
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
self.problem_page.click_choice("choice_3")
@ddt.ddt
class CheckboxProblemTypeTest(CheckboxProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Checkbox Problem Type
"""
shard = 24
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
And I should see question's solution
And I should see correct choices highlighted
"""
self.problem_page.click_show()
self.assertTrue(self.problem_page.is_solution_tag_present())
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[1, 3]))
self.problem_page.wait_for_show_answer_notification()
class CheckboxProblemTypeTestNonRandomized(CheckboxProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for the non-randomized checkbox problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class CheckboxProblemTypeNeverShowCorrectnessTest(CheckboxProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Checkbox Problem Type problems.
"""
pass
@ddt.ddt
class MultipleChoiceProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization Multiple Choice Problem Type
"""
problem_name = 'MULTIPLE CHOICE TEST PROBLEM'
problem_type = 'multiple choice'
factory = MultipleChoiceResponseXMLFactory()
partially_correct = False
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer multiple choice problem.
"""
if correctness == 'incorrect':
self.problem_page.click_choice("choice_choice_1")
else:
self.problem_page.click_choice("choice_choice_2")
@ddt.ddt
class MultipleChoiceProblemTypeTest(MultipleChoiceProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Multiple Choice Problem Type
"""
shard = 24
def test_can_show_answer(self):
"""
Scenario: Verifies that show answer button is working as expected.
Given that I am on courseware page
And I can see a CAPA problem with show answer button
When I click "Show Answer" button
The correct answer is displayed with a single correctness indicator.
"""
# Click the correct answer, but don't submit yet. No correctness shows.
self.answer_problem('correct')
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# After submit, the answer should be marked as correct.
self.problem_page.click_submit()
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[3], show_answer=False))
# Switch to an incorrect answer. This will hide the correctness indicator.
self.answer_problem('incorrect')
self.assertFalse(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# Now click Show Answer. A single correctness indicator should be shown.
self.problem_page.click_show()
self.assertTrue(self.problem_page.is_correct_choice_highlighted(correct_choices=[3]))
# Finally, make sure that clicking Show Answer moved focus to the correct place.
self.problem_page.wait_for_show_answer_notification()
@ddt.ddt
class MultipleChoiceProblemResetCorrectnessAfterChangingAnswerTest(MultipleChoiceProblemTypeBase):
"""
Tests for Multiple choice problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_mcq_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a multiple choice problem when I answer it and after I reset it
Given I am viewing a multiple choice problem
When I answer a multiple choice problem <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a multiple choice problem after changing my answer
Given I am viewing a multiple choice problem
When I answer a multiple choice problem <initial_correctness>
Then my multiple choice answer is marked <initial_correctness>
And I reset the problem
Then my multiple choice answer is NOT marked <initial_correctness>
And my multiple choice answer is NOT marked <other_correctness>
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
@ddt.ddt
class MultipleChoiceProblemTypeTestNonRandomized(MultipleChoiceProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized multiple choice problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True, 'max_attempts': 3}
)
def test_non_randomized_multiple_choice_with_multiple_attempts(self):
"""
Scenario: I can answer a problem with multiple attempts correctly but cannot reset because randomization is off
Given I am viewing a randomization "never" "multiple choice" problem with "3" attempts with reset
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
self.assertEqual(
self.problem_page.submission_feedback,
"You have used 0 of 3 attempts",
"All 3 attempts are not available"
)
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
class MultipleChoiceProblemTypeTestOneAttempt(MultipleChoiceProblemTypeBase):
"""
Test Multiple choice problem with single attempt
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True, 'max_attempts': 1}
)
def test_answer_with_one_attempt_correctly(self):
"""
Scenario: I can answer a problem with one attempt correctly and can not reset
Given I am viewing a "multiple choice" problem with "1" attempt
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
class MultipleChoiceProblemTypeTestMultipleAttempt(MultipleChoiceProblemTypeBase):
"""
Test Multiple choice problem with multiple attempts
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'always', 'show_reset_button': True, 'max_attempts': 3}
)
def test_answer_with_multiple_attempt_correctly(self):
"""
Scenario: I can answer a problem with multiple attempts correctly and still reset the problem
Given I am viewing a "multiple choice" problem with "3" attempts
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does appear
"""
self.assertEqual(
self.problem_page.submission_feedback,
"You have used 0 of 3 attempts",
"All 3 attempts are not available"
)
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertTrue(self.problem_page.is_reset_button_present())
def test_learner_can_see_attempts_left(self):
"""
Scenario: I can view how many attempts I have left on a problem
Given I am viewing a "multiple choice" problem with "3" attempts
Then I should see "You have used 0 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "incorrectly"
And I reset the problem
Then I should see "You have used 1 of 3 attempts" somewhere in the page
When I answer a "multiple choice" problem "incorrectly"
And I reset the problem
Then I should see "You have used 2 of 3 attempts" somewhere in the page
And The "Submit" button does appear
When I answer a "multiple choice" problem "correctly"
Then The "Reset" button does not appear
"""
for attempts_used in range(3):
self.assertEqual(
self.problem_page.submission_feedback,
u"You have used {} of 3 attempts".format(str(attempts_used)),
"All 3 attempts are not available"
)
if attempts_used == 2:
self.assertTrue(self.problem_page.is_submit_disabled())
self.answer_problem("correct")
self.problem_page.click_submit()
self.assertFalse(self.problem_page.is_reset_button_present())
else:
self.answer_problem("incorrect")
self.problem_page.click_submit()
self.problem_page.click_reset()
class MultipleChoiceProblemTypeNeverShowCorrectnessTest(MultipleChoiceProblemTypeBase,
ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Multiple Choice Problem Type problems.
"""
pass
class RadioProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Radio Problem Type
"""
problem_name = 'RADIO TEST PROBLEM'
problem_type = 'radio'
partially_correct = False
factory = ChoiceResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 2',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 0', 'Choice 1', 'Choice 2', 'Choice 3'],
}
status_indicators = {
'correct': ['label.choicegroup_correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['label.choicegroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer radio problem.
"""
if correctness == 'correct':
self.problem_page.click_choice("choice_2")
else:
self.problem_page.click_choice("choice_1")
@ddt.ddt
class RadioProblemTypeTest(RadioProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Multiple Radio Problem Type
"""
shard = 24
pass
@ddt.ddt
class RadioProblemResetCorrectnessAfterChangingAnswerTest(RadioProblemTypeBase):
"""
Tests for Radio problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_radio_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a radio problem when I answer it and after I reset it
Given I am viewing a radio problem
When I answer a radio problem <correctness>
Then I should see a <score>
When I reset the problem
Then I should see a score of points possible: 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a radio problem after changing my answer
Given I am viewing a radio problem
When I answer a radio problem with <initial_correctness>
Then my radio answer is marked <initial_correctness>
And I reset the problem
Then my radio problem's answer is NOT marked <initial_correctness>
And my radio problem's answer is NOT marked <other_correctness>
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
class RadioProblemTypeTestNonRandomized(RadioProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized radio problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class RadioProblemTypeNeverShowCorrectnessTest(RadioProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Radio Problem Type problems.
"""
pass
class DropDownProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Drop Down Problem Type
"""
problem_name = 'DROP DOWN TEST PROBLEM'
problem_type = 'drop down'
partially_correct = False
factory = OptionResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'
}
def answer_problem(self, correctness):
"""
Answer drop down problem.
"""
answer = 'Option 2' if correctness == 'correct' else 'Option 3'
selector_element = self.problem_page.q(
css='.problem .option-input select')
select_option_by_text(selector_element, answer)
@ddt.ddt
class DropdownProblemTypeTest(DropDownProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Dropdown Problem Type
"""
shard = 24
pass
@ddt.ddt
class DropDownProblemTypeTestNonRandomized(DropDownProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Dropdown problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class DropDownProblemTypeNeverShowCorrectnessTest(DropDownProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Drop Down Problem Type problems.
"""
pass
class StringProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for String Problem Type
"""
problem_name = 'STRING TEST PROBLEM'
problem_type = 'string'
partially_correct = False
factory = StringResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer string problem.
"""
textvalue = 'correct string' if correctness == 'correct' else 'incorrect string'
self.problem_page.fill_answer(textvalue)
class StringProblemTypeTest(StringProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the String Problem Type
"""
shard = 24
pass
class StringProblemTypeNeverShowCorrectnessTest(StringProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for String Problem Type problems.
"""
pass
class NumericalProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Numerical Problem Type
"""
problem_name = 'NUMERICAL TEST PROBLEM'
problem_type = 'numerical'
partially_correct = False
factory = NumericalResponseXMLFactory()
factory_kwargs = {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True,
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
'unsubmitted': ['div.unsubmitted']
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer numerical problem.
"""
textvalue = ''
if correctness == 'correct':
textvalue = "pi + 1"
elif correctness == 'error':
textvalue = 'notNum'
else:
textvalue = str(random.randint(-2, 2))
self.problem_page.fill_answer(textvalue)
@ddt.ddt
class NumericalProblemTypeTest(NumericalProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Numerical Problem Type
"""
shard = 12
def test_error_input_gentle_alert(self):
"""
Scenario: I can answer a problem with erroneous input and will see a gentle alert
Given a Numerical Problem type
I can input a string answer
Then I will see a Gentle alert notification
And focus will shift to that notification
And clicking on "Review" moves focus to the problem meta area
"""
# Make sure we're looking at the right problem
self.problem_page.wait_for(
lambda: self.problem_page.problem_name == self.problem_name,
"Make sure the correct problem is on the page"
)
# Answer the problem with an erroneous input to cause a gentle alert
self.assertFalse(self.problem_page.is_gentle_alert_notification_visible())
self.answer_problem(correctness='error')
self.problem_page.click_submit()
self.problem_page.wait_for_gentle_alert_notification()
# Check that clicking on "Review" goes to the problem meta location
self.problem_page.click_review_in_notification(notification_type='gentle-alert')
self.problem_page.wait_for_focus_on_problem_meta()
@ddt.ddt
class NumericalProblemTypeTestNonRandomized(NumericalProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Numerical problem
"""
shard = 12
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class NumericalProblemTypeTestViewAnswer(NumericalProblemTypeBase):
"""
Test learner can view Numerical problem's answer
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'showanswer': 'always'}
)
def test_learner_can_view_answer(self):
"""
Scenario: I can view the answer if the problem has it:
Given I am viewing a "numerical" that shows the answer "always"
When I press the button with the label "Show Answer"
And I should see "4.14159" somewhere in the page
"""
self.problem_page.click_show()
self.assertEqual(self.problem_page.answer, '4.14159')
class NumericalProblemTypeNeverShowCorrectnessTest(NumericalProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Numerical Problem Type problems.
"""
pass
@ddt.ddt
class FormulaProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Formula Problem Type
"""
problem_name = 'FORMULA TEST PROBLEM'
problem_type = 'formula'
partially_correct = False
factory = FormulaResponseXMLFactory()
factory_kwargs = {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y',
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer formula problem.
"""
textvalue = "x^2+2*x+y" if correctness == 'correct' else 'x^2'
self.problem_page.fill_answer(textvalue)
@ddt.ddt
class FormulaProblemTypeTest(FormulaProblemTypeBase, ProblemTypeTestMixin, ChangingAnswerOfProblemTestMixin):
"""
Standard tests for the Formula Problem Type
"""
shard = 24
pass
class FormulaProblemTypeTestNonRandomized(FormulaProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Formula problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class FormulaProblemTypeNeverShowCorrectnessTest(FormulaProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Formula Problem Type problems.
"""
pass
@ddt.ddt
class ScriptProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Script Problem Type
"""
problem_name = 'SCRIPT TEST PROBLEM'
problem_type = 'script'
problem_points = 2
partially_correct = False
factory = CustomResponseXMLFactory()
factory_kwargs = {
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'question_text': 'Enter two integers that sum to 10.',
'input_element_label': 'Enter an integer',
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
"""),
}
status_indicators = {
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered', 'div.unsubmitted'],
'submitted': ['div.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status is present', timeout=10)
return True
except BrokenPromise:
return False
def answer_problem(self, correctness):
"""
Answer script problem.
"""
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if not correctness == 'correct':
second_addend += random.randint(1, 10)
self.problem_page.fill_answer(first_addend, input_num=0)
self.problem_page.fill_answer(second_addend, input_num=1)
@ddt.ddt
class ScriptProblemTypeTest(ScriptProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Script Problem Type
"""
shard = 24
pass
@ddt.ddt
class ScriptProblemResetAfterAnswerTest(ScriptProblemTypeBase):
"""
Test Script problem by resetting answers
"""
shard = 24
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a problem after changing my answer
Given I am viewing a script problem
Then my script problem's answer is marked "unanswered"
When I answer a script problem initial correctness
And I input an answer on a script problem other correctness
Then my script problem answer is marked "unanswered"
"""
self.assertTrue(self.problem_status('unanswered'))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.answer_problem(other_correctness)
self.assertTrue(self.problem_status('unanswered'))
@ddt.data(['correct', '2/2 points (ungraded)'], ['incorrect', '0/2 points (ungraded)'])
@ddt.unpack
def test_script_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a script problem when I answer it and after I reset it
Given I am viewing a script problem
When I answer a script problem correct/incorrect
Then I should see a score
When I reset the problem
Then I should see a score of points possible: 0/2 points (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/2 points (ungraded)')
class ScriptProblemTypeTestNonRandomized(ScriptProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Script problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class ScriptProblemTypeNeverShowCorrectnessTest(ScriptProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Script Problem Type problems.
"""
pass
class JSInputTypeTest(ProblemTypeTestBase, ProblemTypeA11yTestMixin):
"""
TestCase Class for jsinput (custom JavaScript) problem type.
Right now the only test point that is executed is the a11y test.
This is because the factory simply creates an empty iframe.
"""
problem_name = 'JSINPUT PROBLEM'
problem_type = 'customresponse'
factory = JSInputXMLFactory()
factory_kwargs = {
'question_text': 'IFrame shows below (but has no content)'
}
def answer_problem(self, correctness):
"""
Problem is not set up to work (displays an empty iframe), but this method must
be extended because the parent class has marked it as abstract.
"""
raise NotImplementedError()
class CodeProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Code Problem Type
"""
problem_name = 'CODE TEST PROBLEM'
problem_type = 'code'
partially_correct = False
can_update_save_notification = False
factory = CodeResponseXMLFactory()
factory_kwargs = {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}',
}
status_indicators = {
'correct': ['.grader-status .correct ~ .debug'],
'incorrect': ['.grader-status .incorrect ~ .debug'],
'unanswered': ['.grader-status .unanswered ~ .debug'],
'submitted': ['.grader-status .submitted ~ .debug'],
}
def answer_problem(self, correctness):
"""
Answer code problem.
"""
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
class CodeProblemTypeTest(CodeProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Code Problem Type
"""
shard = 12
def test_answer_incorrectly(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def test_cant_submit_blank_answer(self):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
def wait_for_status(self, status):
"""
Overridden for script test because the testing grader always responds
with "correct"
"""
pass
class CodeProblemTypeNeverShowCorrectnessTest(CodeProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Code Problem Type problems.
"""
pass
class ChoiceTextProblemTypeTestBase(ProblemTypeTestBase):
"""
Base class for "Choice + Text" Problem Types.
(e.g. RadioText, CheckboxText)
"""
choice_type = None
partially_correct = False
can_update_save_notification = False
def _select_choice(self, input_num):
"""
Selects the nth (where n == input_num) choice of the problem.
"""
self.problem_page.q(
css=u'div.problem input.ctinput[type="{}"]'.format(self.choice_type)
).nth(input_num).click()
def _fill_input_text(self, value, input_num):
"""
Fills the nth (where n == input_num) text input field of the problem
with value.
"""
self.problem_page.q(
css='div.problem input.ctinput[type="text"]'
).nth(input_num).fill(value)
def answer_problem(self, correctness):
"""
Answer radio text problem.
"""
choice = 0 if correctness == 'correct' else 1
input_value = "8" if correctness == 'correct' else "5"
self._select_choice(choice)
self._fill_input_text(input_value, choice)
class RadioTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Radio Text Problem Type
"""
problem_name = 'RADIO TEXT TEST PROBLEM'
problem_type = 'radio_text'
choice_type = 'radio'
partially_correct = False
can_update_save_notification = False
factory = ChoiceTextResponseXMLFactory()
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
status_indicators = {
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered'],
'submitted': ['section.choicetextgroup_submitted', 'span.submitted'],
}
def problem_status(self, status):
"""
Returns the status of problem
Args:
status(string): status of the problem which is to be checked
Returns:
True: If provided status is present on the page
False: If provided status is not present on the page
"""
selector = ', '.join(self.status_indicators[status])
try:
self.problem_page.wait_for_element_visibility(selector, 'Status not present', timeout=10)
return True
except BrokenPromise:
return False
def setUp(self, *args, **kwargs):
"""
Additional setup for RadioTextProblemTypeBase
"""
super(RadioTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'radiogroup', # TODO: AC-491
'label', # TODO: AC-491
'section', # TODO: AC-491
]
})
@ddt.ddt
class RadioTextProblemTypeTest(RadioTextProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Radio Text Problem Type
"""
shard = 24
pass
@ddt.ddt
class RadioTextProblemResetCorrectnessAfterChangingAnswerTest(RadioTextProblemTypeBase):
"""
Tests for Radio Text problem with changing answers
"""
shard = 24
@ddt.data(['correct', '1/1 point (ungraded)'], ['incorrect', '0/1 point (ungraded)'])
@ddt.unpack
def test_mcq_score_after_answer_and_reset(self, correctness, score):
"""
Scenario: I can see my score on a radio text problem when I answer it and after I reset it
Given I am viewing a radio text problem
When I answer a radio text problem correct/incorrect
Then I should see a score
When I reset the problem
Then I should see a score of points possible: (1/1 point (ungraded) -- 0/1 point (ungraded)
"""
self.answer_problem(correctness)
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, score)
self.problem_page.click_reset()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
@ddt.data(['correct', 'incorrect'], ['incorrect', 'correct'])
@ddt.unpack
def test_reset_correctness_after_changing_answer(self, initial_correctness, other_correctness):
"""
Scenario: I can reset the correctness of a multiple choice problem after changing my answer
Given I am viewing a radio text problem
When I answer a radio text problem InitialCorrectness
Then my radio text answer is marked InitialCorrectness
And I reset the problem
Then my answer is NOT marked InitialCorrectness
And my answer is NOT marked OtherCorrectness
"""
self.assertTrue(self.problem_status("unanswered"))
self.answer_problem(initial_correctness)
self.problem_page.click_submit()
self.assertTrue(self.problem_status(initial_correctness))
self.problem_page.click_reset()
self.assertFalse(self.problem_status(initial_correctness))
self.assertFalse(self.problem_status(other_correctness))
class RadioTextProblemTypeTestNonRandomized(RadioTextProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Radio text problem
"""
shard = 24
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class RadioTextProblemTypeNeverShowCorrectnessTest(RadioTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Radio + Text Problem Type problems.
"""
pass
class CheckboxTextProblemTypeBase(ChoiceTextProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Checkbox Text Problem Type
"""
problem_name = 'CHECKBOX TEXT TEST PROBLEM'
problem_type = 'checkbox_text'
choice_type = 'checkbox'
factory = ChoiceTextResponseXMLFactory()
partially_correct = False
can_update_save_notification = False
factory_kwargs = {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [
("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"}),
],
}
def setUp(self, *args, **kwargs):
"""
Additional setup for CheckboxTextProblemTypeBase
"""
super(CheckboxTextProblemTypeBase, self).setUp(*args, **kwargs)
self.problem_page.a11y_audit.config.set_rules({
"ignore": [
'checkboxgroup', # TODO: AC-491
'label', # TODO: AC-491
'section', # TODO: AC-491
]
})
class CheckboxTextProblemTypeTest(CheckboxTextProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Checkbox Text Problem Type
"""
pass
class CheckboxTextProblemTypeTestNonRandomized(CheckboxTextProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Checkbox problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class CheckboxTextProblemTypeNeverShowCorrectnessTest(CheckboxTextProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Checkbox + Text Problem Type problems.
"""
pass
class ImageProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Image Problem Type
"""
problem_name = 'IMAGE TEST PROBLEM'
problem_type = 'image'
partially_correct = False
factory = ImageResponseXMLFactory()
can_submit_blank = True
can_update_save_notification = False
factory_kwargs = {
'src': '/static/images/placeholder-image.png',
'rectangle': '(0,0)-(50,50)',
}
def answer_problem(self, correctness):
"""
Answer image problem.
"""
offset = 25 if correctness == 'correct' else -25
input_selector = ".imageinput [id^='imageinput_'] img"
input_element = self.problem_page.q(css=input_selector)[0]
chain = ActionChains(self.browser)
chain.move_to_element(input_element)
chain.move_by_offset(offset, offset)
chain.click()
chain.perform()
@ddt.ddt
class ImageProblemTypeTest(ImageProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Image Problem Type
"""
def test_image_problem_score_with_blank_answer(self):
"""
Scenario: I can see my score on a problem to which I submit a blank answer
Given I am viewing aN image problem
When I submit a problem
Then I should see a score of Points Possible: 0/1 point (ungraded)
"""
self.problem_page.click_submit()
self.assertEqual(self.problem_page.problem_progress_graded_value, '0/1 point (ungraded)')
class ImageProblemTypeTestNonRandomized(ImageProblemTypeBase, NonRandomizedProblemTypeTestMixin):
"""
Tests for non-randomized Image problem
"""
def get_problem(self):
"""
Creates a {problem_type} problem
"""
# Generate the problem XML using capa.tests.response_xml_factory
return XBlockFixtureDesc(
'problem',
self.problem_name,
data=self.factory.build_xml(**self.factory_kwargs),
metadata={'rerandomize': 'never', 'show_reset_button': True}
)
class ImageProblemTypeNeverShowCorrectnessTest(ImageProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Image Problem Type problems.
"""
pass
class SymbolicProblemTypeBase(ProblemTypeTestBase):
"""
ProblemTypeTestBase specialization for Symbolic Problem Type
"""
problem_name = 'SYMBOLIC TEST PROBLEM'
problem_type = 'symbolicresponse'
partially_correct = False
factory = SymbolicResponseXMLFactory()
factory_kwargs = {
'expect': '2*x+3*y',
'question_text': 'Enter a value'
}
status_indicators = {
'correct': ['div.capa_inputtype div.correct'],
'incorrect': ['div.capa_inputtype div.incorrect'],
'unanswered': ['div.capa_inputtype div.unanswered'],
'submitted': ['div.capa_inputtype div.submitted'],
}
def answer_problem(self, correctness):
"""
Answer symbolic problem.
"""
choice = "2*x+3*y" if correctness == 'correct' else "3*a+4*b"
self.problem_page.fill_answer(choice)
class SymbolicProblemTypeTest(SymbolicProblemTypeBase, ProblemTypeTestMixin):
"""
Standard tests for the Symbolic Problem Type
"""
pass
class SymbolicProblemTypeNeverShowCorrectnessTest(SymbolicProblemTypeBase, ProblemNeverShowCorrectnessMixin):
"""
Ensure that correctness can be withheld for Symbolic Problem Type problems.
"""
pass
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.