gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Retrieve and interpolate data for Earth Orientation and timescales conversions
"""
import logging
from pathlib import Path
from inspect import isclass
from pkg_resources import iter_entry_points
from ..config import config
from ..errors import EopError, ConfigError
__all__ = ["register", "EopDb", "TaiUtc", "Finals", "Finals2000A"]
log = logging.getLogger(__name__)
class TaiUtc:
"""File listing all leap seconds throught history
This file could be retrieved `here <http://maia.usno.navy.mil/ser7/tai-utc.dat>`__, but this server seems discontinued.
"""
def __init__(self, path, encoding="ascii"):
self.path = Path(path)
self.data = []
with self.path.open(encoding=encoding) as fhandler:
lines = fhandler.read().splitlines()
for line in lines:
if not line:
continue
line = line.split()
mjd = int(float(line[4]) - 2400000.5)
value = float(line[6])
self.data.append((mjd, value))
def __getitem__(self, date):
for mjd, value in reversed(self.data):
if mjd <= date:
return value
def get_last_next(self, date):
"""Provide the last and next leap-second events relative to a date
Args:
date (float): Date in MJD
Return:
tuple:
"""
past, future = (None, None), (None, None)
for mjd, value in reversed(self.data):
if mjd <= date:
past = (mjd, value)
break
future = (mjd, value)
return past, future
class Finals2000A:
"""History of Earth orientation correction for IAU2000 model
Three files are available `here <https://datacenter.iers.org/eop.php>`__ for this model:
- **finals2000A.all**, from 1976-01-02 to present, updated weekly
- **finals2000A.data**, from 1992-01-01 to present, updated weekly
- **finals2000A.daily**, last 90 days + 90 days of prediction, updated daily
See the associated metadata for more informations about the content of these files.
"""
deltas = ("dx", "dy")
def __init__(self, path, encoding="ascii"):
self.path = Path(path)
d1, d2 = self.deltas
with self.path.open(encoding=encoding) as fp:
lines = fp.read().splitlines()
self.data = {}
for line in lines:
line = line.rstrip()
mjd = int(float(line[7:15]))
try:
self.data[mjd] = {
"mjd": mjd,
# 'flag': line[16],
"x": float(line[18:27]),
d1: None,
# 'Xerror': float(line[27:36]),
"y": float(line[37:46]),
d2: None,
# 'Yerror': float(line[46:55]),
"lod": None,
"ut1_utc": float(line[58:68]),
}
except ValueError:
# Common values (X, Y, UT1-UTC) are not available anymore
break
else:
try:
self.data[mjd][d1] = float(line[97:106])
self.data[mjd][d2] = float(line[116:125])
except ValueError:
# dX and dY are not available for this date, so we take
# the last value available
self.data[mjd][d1] = self.data[mjd - 1][d1]
self.data[mjd][d2] = self.data[mjd - 1][d2]
pass
try:
self.data[mjd]["lod"] = float(line[79:86])
except ValueError:
# LOD is not available for this date so we take the last value available
self.data[mjd]["lod"] = self.data[mjd - 1]["lod"]
pass
def __getitem__(self, key):
return self.data[key]
def items(self):
return self.data.items()
def dates(self):
return self.data.dates()
class Finals(Finals2000A):
"""History of Earth orientation correction for IAU1980 model
Three files are available `here <https://datacenter.iers.org/eop.php>`__ for this model:
- **finals.all**, from 1976-01-02 to present, updated weekly
- **finals.data**, from 1992-01-01 to present, updated weekly
- **finals.daily**, last 90 days + 90 days of prediction, updated daily
See the associated metadata for more informations about the content of these files.
"""
deltas = ("dpsi", "deps")
class Eop:
"""Earth Orientation Parameters"""
def __init__(self, **kwargs):
self.x = kwargs["x"]
self.y = kwargs["y"]
self.dx = kwargs["dx"]
self.dy = kwargs["dy"]
self.deps = kwargs["deps"]
self.dpsi = kwargs["dpsi"]
self.lod = kwargs["lod"]
self.ut1_utc = kwargs["ut1_utc"]
self.tai_utc = kwargs["tai_utc"]
def __repr__(self):
return "{name}(x={x}, y={y}, dx={dx}, dy={dy}, deps={deps}, dpsi={dpsi}, lod={lod}, ut1_utc={ut1_utc}, tai_utc={tai_utc})".format(
name=self.__class__.__name__, **self.__dict__
)
class EopDb:
"""Class handling the different EOP databases available, in a simple abstraction layer.
By defining a simple parameter in the config dict, this class will handle the instanciation
of the database and queries in a transparent manner.
see :ref:`dbname <eop-dbname>` and :ref:`missing policy <eop-missing-policy>` configurations.
"""
_dbs = {}
DEFAULT_DBNAME = "default"
"""Default name used for EOP database lookup."""
PASS = "pass"
WARN = "warning"
ERROR = "error"
MIS_DEFAULT = PASS
"""Default behaviour in case of missing value"""
@classmethod
def _load_entry_points(cls):
if not hasattr(cls, "_entry_points_loaded"):
# Loading external DB, via entry points
for entry in iter_entry_points("beyond.eopdb"):
EopDb.register(entry.load(), entry.name)
cls._entry_points_loaded = True
@classmethod
def db(cls, dbname=None):
"""Retrieve the database
Args:
dbname: Specify the name of the database to retrieve. If set to `None`, take the name
from the configuration (see :ref:`configuration <eop-dbname>`)
Return:
object
"""
cls._load_entry_points()
dbname = dbname or config.get("eop", "dbname", fallback=cls.DEFAULT_DBNAME)
if dbname not in cls._dbs.keys():
raise EopError(f"Unknown database '{dbname}'")
if isclass(cls._dbs[dbname]):
# Instanciation
try:
cls._dbs[dbname] = cls._dbs[dbname]()
except Exception as e:
# Keep the exception in cache in order to not retry instanciation
# every single time EopDb.db() is called, as instanciation
# of database is generally a time consumming operation.
# If it failed once, it will most probably fail again
cls._dbs[dbname] = e
if isinstance(cls._dbs[dbname], Exception):
raise EopError("Problem at database instanciation") from cls._dbs[dbname]
return cls._dbs[dbname]
@classmethod
def get(cls, mjd: float, dbname: str = None) -> Eop:
"""Retrieve Earth Orientation Parameters and timescales differences
for a given date
Args:
mjd: Date expressed as Mean Julian Date
dbname: Name of the database to use
Return:
Eop: Interpolated data for this particuliar MJD
"""
try:
value = cls.db(dbname)[mjd]
except (EopError, KeyError) as e:
if isinstance(e, KeyError):
msg = f"Missing EOP data for mjd = '{e}'"
else:
msg = str(e)
if cls.policy() == cls.WARN:
log.warning(msg)
elif cls.policy() == cls.ERROR:
raise
value = Eop(
x=0, y=0, dx=0, dy=0, deps=0, dpsi=0, lod=0, ut1_utc=0, tai_utc=0
)
return value
@classmethod
def policy(cls):
pol = config.get("eop", "missing_policy", fallback=cls.MIS_DEFAULT)
if pol not in (cls.PASS, cls.WARN, cls.ERROR):
raise ConfigError("Unknown config value for 'eop.missing_policy'")
return pol
@classmethod
def register(cls, klass, name=DEFAULT_DBNAME):
"""Register an Eop Database
The only requirement of this database is that it should have ``__getitem__``
method accepting MJD as float.
"""
if name in cls._dbs:
msg = f"'{name}' is already registered for an Eop database. Skipping"
log.warning(msg)
else:
cls._dbs[name] = klass
def register(name=EopDb.DEFAULT_DBNAME):
"""Decorator for registering an Eop Database
Example:
.. code-block:: python
@register
class SqliteEnvDatabase:
# sqlite implementation
# this database will be known as 'default'
@register('json')
class JsonEnvDatabase:
# JSON implementation
EopDb.get(58090.2) # get Eop from SqliteEnvDatabase
EopDb.get(58090.2, dbname='default') # same as above
EopDb.get(58090.2, dbname='json') # get Eop from JsonEnvDatabase
"""
# I had a little trouble setting this function up, due to the fact that
# I wanted it to be usable both as a simple decorator (``@register``)
# and a decorator with arguments (``@register('mydatabase')``).
# The current implementation allows this dual-use, but it's a bit hacky.
# In the simple decorator mode, when the @register decorator is called
# the argument passed is the class to decorate. So it *is* the decorated
# function
# In the decorator-with-arguments mode, the @register decorator should provide
# a callable that will be the decorated function. This callable takes
# the class you want to decorate
if isinstance(name, str):
# decorator with argument
def wrapper(klass):
EopDb.register(klass, name)
return klass
return wrapper
else:
# simple decorator mode
klass = name
EopDb.register(klass)
return klass
@register
class SimpleEopDatabase:
"""Simple implementation of database
Uses ``tai-utc.dat``, ``finals.all`` and ``finals2000A.all`` files directly
without caching nor interpolation.
In order to use these files, you have to provide the directory containing them as a config
variable. Optionally, you can provide the type of data you want to extract from finals files
('all', 'data' or 'daily').
.. code-block:: python
from beyond.config import config
config.update({
'eop': {
'folder': "/path/to/eop/data/",
'type': "all"
}
})
"""
def __init__(self):
path = Path(config.get("eop", "folder", fallback=Path.cwd()))
type = config.get("eop", "type", fallback="all")
# Data reading
f = Finals(path / (f"finals.{type}"))
f2 = Finals2000A(path / (f"finals2000A.{type}"))
t = TaiUtc(path / "tai-utc.dat")
# Extracting data from finals files
self._finals = {}
for date, values in f.items():
self._finals[date] = values
self._finals[date].update(f2[date])
self._tai_utc = t.data.copy()
def __getitem__(self, mjd):
data = self.finals(mjd)
data["tai_utc"] = self.tai_utc(mjd)
return Eop(**data)
def finals(self, mjd: float):
return self._finals[int(mjd)].copy()
def tai_utc(self, mjd: float):
for date, value in reversed(self._tai_utc):
if date <= mjd:
return value
else:
raise KeyError(mjd)
| |
# Copyright (c) 2012-2019, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
from . import AWSObject
from . import AWSProperty
from troposphere import Tags
from .validators import integer
class AudioLanguageSelection(AWSProperty):
props = {
'LanguageCode': (basestring, False),
'LanguageSelectionPolicy': (basestring, False),
}
class AudioPidSelection(AWSProperty):
props = {
'Pid': (integer, False),
}
class AudioSelectorSettings(AWSProperty):
props = {
'AudioLanguageSelection': (AudioLanguageSelection, False),
'AudioPidSelection': (AudioPidSelection, False),
}
class AudioSelector(AWSProperty):
props = {
'Name': (basestring, False),
'SelectorSettings': (AudioSelectorSettings, False),
}
class AribSourceSettings(AWSProperty):
props = {
}
class DvbSubSourceSettings(AWSProperty):
props = {
'Pid': (integer, False),
}
class EmbeddedSourceSettings(AWSProperty):
props = {
'Convert608To708': (basestring, False),
'Scte20Detection': (basestring, False),
'Source608ChannelNumber': (integer, False),
'Source608TrackNumber': (integer, False),
}
class Scte20SourceSettings(AWSProperty):
props = {
'Convert608To708': (basestring, False),
'Source608ChannelNumber': (integer, False),
}
class Scte27SourceSettings(AWSProperty):
props = {
'Pid': (integer, False),
}
class TeletextSourceSettings(AWSProperty):
props = {
'PageNumber': (basestring, False),
}
class CaptionSelectorSettings(AWSProperty):
props = {
'AribSourceSettings': (AribSourceSettings, False),
'DvbSubSourceSettings': (DvbSubSourceSettings, False),
'EmbeddedSourceSettings': (EmbeddedSourceSettings, False),
'Scte20SourceSettings': (Scte20SourceSettings, False),
'Scte27SourceSettings': (Scte27SourceSettings, False),
'TeletextSourceSettings': (TeletextSourceSettings, False),
}
class CaptionSelector(AWSProperty):
props = {
'LanguageCode': (basestring, False),
'Name': (basestring, False),
'SelectorSettings': (CaptionSelectorSettings, False),
}
class HlsInputSettings(AWSProperty):
props = {
'Bandwidth': (integer, False),
'BufferSegments': (integer, False),
'Retries': (integer, False),
'RetryInterval': (integer, False),
}
class NetworkInputSettings(AWSProperty):
props = {
'HlsInputSettings': (HlsInputSettings, False),
'ServerValidation': (basestring, False),
}
class VideoSelectorPid(AWSProperty):
props = {
'Pid': (integer, False),
}
class VideoSelectorProgramId(AWSProperty):
props = {
'ProgramId': (integer, False),
}
class VideoSelectorSettings(AWSProperty):
props = {
'VideoSelectorPid': (VideoSelectorPid, False),
'VideoSelectorProgramId': (VideoSelectorProgramId, False),
}
class VideoSelector(AWSProperty):
props = {
'ColorSpace': (basestring, False),
'ColorSpaceUsage': (basestring, False),
'SelectorSettings': (VideoSelectorSettings, False),
}
class InputSettings(AWSProperty):
props = {
'AudioSelectors': ([AudioSelector], False),
'CaptionSelectors': ([CaptionSelector], False),
'DeblockFilter': (basestring, False),
'DenoiseFilter': (basestring, False),
'FilterStrength': (integer, False),
'InputFilter': (basestring, False),
'NetworkInputSettings': (NetworkInputSettings, False),
'SourceEndBehavior': (basestring, False),
'VideoSelector': (VideoSelector, False),
}
class InputAttachment(AWSProperty):
props = {
'InputAttachmentName': (basestring, False),
'InputId': (basestring, False),
'InputSettings': (InputSettings, False),
}
class InputSpecification(AWSProperty):
props = {
'Codec': (basestring, False),
'MaximumBitrate': (basestring, False),
'Resolution': (basestring, False),
}
class MediaPackageOutputDestinationSettings(AWSProperty):
props = {
'ChannelId': (basestring, False),
}
class OutputDestinationSettings(AWSProperty):
props = {
'PasswordParam': (basestring, False),
'StreamName': (basestring, False),
'Url': (basestring, False),
'Username': (basestring, False),
}
class OutputDestination(AWSProperty):
props = {
'Id': (basestring, False),
'MediaPackageSettings':
([MediaPackageOutputDestinationSettings], False),
'Settings': ([OutputDestinationSettings], False),
}
class Channel(AWSObject):
resource_type = "AWS::MediaLive::Channel"
props = {
'ChannelClass': (basestring, False),
'Destinations': ([OutputDestination], False),
'EncoderSettings': (dict, False),
'InputAttachments': ([InputAttachment], False),
'InputSpecification': (InputSpecification, False),
'LogLevel': (basestring, False),
'Name': (basestring, False),
'RoleArn': (basestring, False),
'Tags': (Tags, False),
}
class InputDestinationRequest(AWSProperty):
props = {
'StreamName': (basestring, False),
}
class InputSourceRequest(AWSProperty):
props = {
'PasswordParam': (basestring, False),
'Url': (basestring, False),
'Username': (basestring, False),
}
class InputVpcRequest(AWSProperty):
props = {
'SecurityGroupIds': ([basestring], False),
'SubnetIds': ([basestring], False),
}
class MediaConnectFlowRequest(AWSProperty):
props = {
'FlowArn': (basestring, False),
}
class Input(AWSObject):
resource_type = "AWS::MediaLive::Input"
props = {
'Destinations': ([InputDestinationRequest], False),
'InputSecurityGroups': ([basestring], False),
'MediaConnectFlows': ([MediaConnectFlowRequest], False),
'Name': (basestring, False),
'RoleArn': (basestring, False),
'Sources': ([InputSourceRequest], False),
'Tags': (Tags, False),
'Type': (basestring, False),
'Vpc': (InputVpcRequest, False),
}
class InputWhitelistRuleCidr(AWSProperty):
props = {
'Cidr': (basestring, False),
}
class InputSecurityGroup(AWSObject):
resource_type = "AWS::MediaLive::InputSecurityGroup"
props = {
'Tags': (Tags, False),
'WhitelistRules': ([InputWhitelistRuleCidr], False),
}
| |
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.logger import Logger
import json
import math
import re
from resource_management.libraries.functions import format
class ADH15StackAdvisor(ADH14StackAdvisor):
def __init__(self):
super(ADH15StackAdvisor, self).__init__()
Logger.initialize_logger()
def getServiceConfigurationRecommenderDict(self):
parentRecommendConfDict = super(ADH15StackAdvisor, self).getServiceConfigurationRecommenderDict()
childRecommendConfDict = {
"DRUID": self.recommendDruidConfigurations,
"SUPERSET": self.recommendSupersetConfigurations,
"ATLAS": self.recommendAtlasConfigurations,
"TEZ": self.recommendTezConfigurations,
"RANGER": self.recommendRangerConfigurations,
"RANGER_KMS": self.recommendRangerKMSConfigurations,
"HDFS": self.recommendHDFSConfigurations,
"HIVE": self.recommendHIVEConfigurations,
"HBASE": self.recommendHBASEConfigurations,
"YARN": self.recommendYARNConfigurations,
"KAFKA": self.recommendKAFKAConfigurations,
"STORM": self.recommendSTORMConfigurations,
"SPARK2": self.recommendSPARK2Configurations,
"ZEPPELIN": self.recommendZEPPELINConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
def recommendSTORMConfigurations(self, configurations, clusterData, services, hosts):
"""
In HDF-2.6.1 we introduced a new way of doing Auto Credentials with services such as
HDFS, HIVE, HBASE. This method will update the required configs for autocreds if the users installs
STREAMLINE service.
"""
super(ADH15StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
storm_site = self.getServicesSiteProperties(services, "storm-site")
storm_env = self.getServicesSiteProperties(services, "storm-env")
putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
security_enabled = self.isSecurityEnabled(services)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if storm_env and storm_site and security_enabled and 'STREAMLINE' in servicesList:
storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"] if "nimbus.impersonation.acl" in storm_site else None
streamline_env = self.getServicesSiteProperties(services, "streamline-env")
if streamline_env:
_streamline_principal_name = streamline_env['streamline_principal_name'] if 'streamline_principal_name' in streamline_env else None
if _streamline_principal_name is not None and storm_nimbus_impersonation_acl is not None:
streamline_bare_principal = get_bare_principal(_streamline_principal_name)
storm_nimbus_impersonation_acl.replace('{{streamline_bare_principal}}', streamline_bare_principal)
putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
storm_nimbus_autocred_plugin_classes = storm_site["nimbus.autocredential.plugins.classes"] if "nimbus.autocredential.plugins.classes" in storm_site else None
if storm_nimbus_autocred_plugin_classes is not None:
new_storm_nimbus_autocred_plugin_classes = ['org.apache.storm.hdfs.security.AutoHDFS',
'org.apache.storm.hbase.security.AutoHBase',
'org.apache.storm.hive.security.AutoHive']
new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_autocred_plugin_classes,
new_storm_nimbus_autocred_plugin_classes)
putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
else:
putStormSiteProperty("nimbus.autocredential.plugins.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
storm_nimbus_credential_renewer_classes = storm_site["nimbus.credential.renewers.classes"] if "nimbus.credential.renewers.classes" in storm_site else None
if storm_nimbus_credential_renewer_classes is not None:
new_storm_nimbus_credential_renewer_classes_array = ['org.apache.storm.hdfs.security.AutoHDFS',
'org.apache.storm.hbase.security.AutoHBase',
'org.apache.storm.hive.security.AutoHive']
new_conf = DefaultStackAdvisor.appendToYamlString(storm_nimbus_credential_renewer_classes,
new_storm_nimbus_credential_renewer_classes_array)
putStormSiteProperty("nimbus.autocredential.plugins.classes", new_conf)
else:
putStormSiteProperty("nimbus.credential.renewers.classes", "['org.apache.storm.hdfs.security.AutoHDFS', 'org.apache.storm.hbase.security.AutoHBase', 'org.apache.storm.hive.security.AutoHive']")
putStormSiteProperty("nimbus.credential.renewers.freq.secs", "82800")
pass
def recommendSPARK2Configurations(self, configurations, clusterData, services, hosts):
"""
:type configurations dict
:type clusterData dict
:type services dict
:type hosts dict
"""
super(ADH15StackAdvisor, self).recommendSpark2Configurations(configurations, clusterData, services, hosts)
self.__addZeppelinToLivy2SuperUsers(configurations, services)
def recommendZEPPELINConfigurations(self, configurations, clusterData, services, hosts):
"""
:type configurations dict
:type clusterData dict
:type services dict
:type hosts dict
"""
super(ADH15StackAdvisor, self).recommendZeppelinConfigurations(configurations, clusterData, services, hosts)
self.__addZeppelinToLivy2SuperUsers(configurations, services)
def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendAtlasConfigurations(configurations, clusterData, services, hosts)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
putAtlasApplicationProperty = self.putProperty(configurations, "application-properties", services)
knox_host = 'localhost'
knox_port = '8443'
if 'KNOX' in servicesList:
knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
if len(knox_hosts) > 0:
knox_hosts.sort()
knox_host = knox_hosts[0]
if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
putAtlasApplicationProperty('atlas.sso.knox.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
knox_service_user = ''
if 'KNOX' in servicesList and 'knox-env' in services['configurations']:
knox_service_user = services['configurations']['knox-env']['properties']['knox_user']
else:
knox_service_user = 'knox'
putAtlasApplicationProperty('atlas.proxyusers',knox_service_user)
def recommendDruidConfigurations(self, configurations, clusterData, services, hosts):
# druid is not in list of services to be installed
if 'druid-common' not in services['configurations']:
return
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
putCommonProperty = self.putProperty(configurations, "druid-common", services)
putCommonProperty('druid.zk.service.host', self.getZKHostPortString(services))
self.recommendDruidMaxMemoryLimitConfigurations(configurations, clusterData, services, hosts)
# recommending the metadata storage uri
database_name = services['configurations']["druid-common"]["properties"]["database_name"]
metastore_hostname = services['configurations']["druid-common"]["properties"]["metastore_hostname"]
database_type = services['configurations']["druid-common"]["properties"]["druid.metadata.storage.type"]
metadata_storage_port = "1527"
mysql_module_name = "mysql-metadata-storage"
postgres_module_name = "postgresql-metadata-storage"
extensions_load_list = services['configurations']['druid-common']['properties']['druid.extensions.loadList']
putDruidCommonProperty = self.putProperty(configurations, "druid-common", services)
extensions_load_list = self.removeFromList(extensions_load_list, mysql_module_name)
extensions_load_list = self.removeFromList(extensions_load_list, postgres_module_name)
if database_type == 'mysql':
metadata_storage_port = "3306"
extensions_load_list = self.addToList(extensions_load_list, mysql_module_name)
if database_type == 'postgresql':
extensions_load_list = self.addToList(extensions_load_list, postgres_module_name)
metadata_storage_port = "5432"
putDruidCommonProperty('druid.metadata.storage.connector.port', metadata_storage_port)
putDruidCommonProperty('druid.metadata.storage.connector.connectURI',
self.getMetadataConnectionString(database_type).format(metastore_hostname, database_name,
metadata_storage_port))
# HDFS is installed
if "HDFS" in servicesList and "hdfs-site" in services["configurations"]:
# recommend HDFS as default deep storage
extensions_load_list = self.addToList(extensions_load_list, "druid-hdfs-storage")
putCommonProperty("druid.storage.type", "hdfs")
putCommonProperty("druid.storage.storageDirectory", "/apps/druid/warehouse")
# configure indexer logs configs
putCommonProperty("druid.indexer.logs.type", "hdfs")
putCommonProperty("druid.indexer.logs.directory", "/user/druid/logs")
if "KAFKA" in servicesList:
extensions_load_list = self.addToList(extensions_load_list, "druid-kafka-indexing-service")
if 'AMBARI_METRICS' in servicesList:
extensions_load_list = self.addToList(extensions_load_list, "ambari-metrics-emitter")
putCommonProperty('druid.extensions.loadList', extensions_load_list)
# JVM Configs go to env properties
putEnvProperty = self.putProperty(configurations, "druid-env", services)
# processing thread pool and memory configs
for component in ['DRUID_HISTORICAL', 'DRUID_BROKER']:
component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
putComponentProperty = self.putProperty(configurations, format("druid-{nodeType}"), services)
if (component_hosts is not None and len(component_hosts) > 0):
totalAvailableCpu = self.getMinCpu(component_hosts)
processingThreads = 1
if totalAvailableCpu > 1:
processingThreads = totalAvailableCpu - 1
numMergeBuffers = max(2, processingThreads/4)
putComponentProperty('druid.processing.numThreads', processingThreads)
putComponentProperty('druid.server.http.numThreads', max(10, (totalAvailableCpu * 17) / 16 + 2) + 30)
putComponentProperty('druid.processing.numMergeBuffers', numMergeBuffers)
totalAvailableMemInMb = self.getMinMemory(component_hosts) / 1024
maxAvailableBufferSizeInMb = totalAvailableMemInMb/(processingThreads + numMergeBuffers)
putComponentProperty('druid.processing.buffer.sizeBytes', self.getDruidProcessingBufferSizeInMb(maxAvailableBufferSizeInMb) * 1024 * 1024)
# returns the recommended druid processing buffer size in Mb.
# the recommended buffer size is kept lower then the max available memory to have enough free memory to load druid data.
# for low memory nodes, the actual allocated buffer size is small to keep some free memory for memory mapping of segments
# If user installs all druid processes on a single node, memory available for loading segments will be further decreased.
def getDruidProcessingBufferSizeInMb(self, maxAvailableBufferSizeInMb):
if maxAvailableBufferSizeInMb <= 256:
return min(64, maxAvailableBufferSizeInMb)
elif maxAvailableBufferSizeInMb <= 1024:
return 128
elif maxAvailableBufferSizeInMb <= 2048:
return 256
elif maxAvailableBufferSizeInMb <= 6144:
return 512
# High Memory nodes below
else :
return 1024
def recommendSupersetConfigurations(self, configurations, clusterData, services, hosts):
# superset is in list of services to be installed
if 'superset' in services['configurations']:
# Recommendations for Superset
superset_database_type = services['configurations']["superset"]["properties"]["SUPERSET_DATABASE_TYPE"]
putSupersetProperty = self.putProperty(configurations, "superset", services)
if superset_database_type == "mysql":
putSupersetProperty("SUPERSET_DATABASE_PORT", "3306")
elif superset_database_type == "postgresql":
putSupersetProperty("SUPERSET_DATABASE_PORT", "5432")
def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
hsi_env_poperties = self.getServicesSiteProperties(services, "hive-interactive-env")
if "tez-site" not in services["configurations"]:
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', '')
else:
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', 'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl')
if 'HIVE' in servicesList and "yarn-site" in services["configurations"] and "yarn.nodemanager.kill-escape.user" in \
services["configurations"]["yarn-site"]["properties"] and 'hive-env' in services['configurations'] and \
'hive_user' in services['configurations']['hive-env']['properties']:
hive_user_name = services['configurations']['hive-env']['properties']['hive_user']
old_hive_user_name = getOldValue(self, services, "hive-env", "hive_user")
yarn_nm_kill_escape_user = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.kill-escape.user"]
if not hive_user_name in yarn_nm_kill_escape_user:
if not yarn_nm_kill_escape_user or yarn_nm_kill_escape_user.strip() == "":
yarn_nm_kill_escape_user = hive_user_name
else:
escape_user_names = yarn_nm_kill_escape_user.split(",")
if old_hive_user_name in escape_user_names:
escape_user_names.remove(old_hive_user_name)
escape_user_names.append(hive_user_name)
yarn_nm_kill_escape_user = ",".join(escape_user_names)
putYarnSiteProperty("yarn.nodemanager.kill-escape.user", yarn_nm_kill_escape_user)
if "yarn-site" in services["configurations"] and \
"yarn.resourcemanager.scheduler.monitor.enable" in services["configurations"]["yarn-site"]["properties"]:
scheduler_monitor_enabled = services["configurations"]["yarn-site"]["properties"]["yarn.resourcemanager.scheduler.monitor.enable"]
if scheduler_monitor_enabled.lower() == 'true':
putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "true")
else:
putYarnSiteProperty('yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled', "false")
# calculate total_preemption_per_round
total_preemption_per_round = str(round(max(float(1)/len(hosts['items']), 0.1),2))
putYarnSiteProperty('yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round', total_preemption_per_round)
if 'yarn-env' in services['configurations'] and 'yarn_user' in services['configurations']['yarn-env']['properties']:
yarn_user = services['configurations']['yarn-env']['properties']['yarn_user']
else:
yarn_user = 'yarn'
if 'ranger-yarn-plugin-properties' in configurations and 'ranger-yarn-plugin-enabled' in configurations['ranger-yarn-plugin-properties']['properties']:
ranger_yarn_plugin_enabled = (configurations['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-yarn-plugin-properties' in services['configurations'] and 'ranger-yarn-plugin-enabled' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
ranger_yarn_plugin_enabled = (services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled'].lower() == 'Yes'.lower())
else:
ranger_yarn_plugin_enabled = False
if ranger_yarn_plugin_enabled and 'ranger-yarn-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
Logger.info("Setting Yarn Repo user for Ranger.")
putRangerYarnPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
putRangerYarnPluginProperty("REPOSITORY_CONFIG_USERNAME",yarn_user)
else:
Logger.info("Not setting Yarn Repo user for Ranger.")
yarn_timeline_app_cache_size = None
host_mem = None
for host in hosts["items"]:
host_mem = host["Hosts"]["total_mem"]
break
# Check if 'yarn.timeline-service.entity-group-fs-store.app-cache-size' in changed configs.
changed_configs_has_ats_cache_size = self.isConfigPropertiesChanged(
services, "yarn-site", ['yarn.timeline-service.entity-group-fs-store.app-cache-size'], False)
# Check if it's : 1. 'apptimelineserver_heapsize' changed detected in changed-configurations)
# OR 2. cluster initialization (services['changed-configurations'] should be empty in this case)
if changed_configs_has_ats_cache_size:
yarn_timeline_app_cache_size = self.read_yarn_apptimelineserver_cache_size(services)
elif 0 == len(services['changed-configurations']):
# Fetch host memory from 1st host, to be used for ATS config calculations below.
if host_mem is not None:
yarn_timeline_app_cache_size = self.calculate_yarn_apptimelineserver_cache_size(host_mem)
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.app-cache-size', yarn_timeline_app_cache_size)
Logger.info("Updated YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as : {0}, "
"using 'host_mem' = {1}".format(yarn_timeline_app_cache_size, host_mem))
else:
Logger.info("Couldn't update YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size' as "
"'host_mem' read = {0}".format(host_mem))
if yarn_timeline_app_cache_size is not None:
# Calculation for 'ats_heapsize' is in MB.
ats_heapsize = self.calculate_yarn_apptimelineserver_heapsize(host_mem, yarn_timeline_app_cache_size)
putYarnEnvProperty('apptimelineserver_heapsize', ats_heapsize) # Value in MB
Logger.info("Updated YARN config 'apptimelineserver_heapsize' as : {0}, ".format(ats_heapsize))
# Queue 'llap' creation/removal logic (Used by Hive Interactive server and associated LLAP)
if hsi_env_poperties and 'enable_hive_interactive' in hsi_env_poperties:
enable_hive_interactive = hsi_env_poperties['enable_hive_interactive']
LLAP_QUEUE_NAME = 'llap'
# Hive Server interactive is already added or getting added
if enable_hive_interactive == 'true':
self.updateLlapConfigs(configurations, services, hosts, LLAP_QUEUE_NAME)
else: # When Hive Interactive Server is in 'off/removed' state.
self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
timeline_plugin_classes_values = []
timeline_plugin_classpath_values = []
if self.__isServiceDeployed(services, "TEZ"):
timeline_plugin_classes_values.append('org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl')
if self.__isServiceDeployed(services, "SPARK"):
timeline_plugin_classes_values.append('org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
timeline_plugin_classpath_values.append(stack_root + "/spark/jars/*")
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', ",".join(timeline_plugin_classes_values))
putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath', ":".join(timeline_plugin_classpath_values))
"""
Calculate YARN config 'apptimelineserver_heapsize' in MB.
"""
def calculate_yarn_apptimelineserver_heapsize(self, host_mem, yarn_timeline_app_cache_size):
ats_heapsize = None
if host_mem < 4096:
ats_heapsize = 1024
else:
ats_heapsize = long(min(math.floor(host_mem/2), long(yarn_timeline_app_cache_size) * 500 + 3072))
return ats_heapsize
"""
Calculates for YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size', based on YARN's NodeManager size.
"""
def calculate_yarn_apptimelineserver_cache_size(self, host_mem):
yarn_timeline_app_cache_size = None
if host_mem < 4096:
yarn_timeline_app_cache_size = 3
elif host_mem >= 4096 and host_mem < 8192:
yarn_timeline_app_cache_size = 7
elif host_mem >= 8192:
yarn_timeline_app_cache_size = 10
Logger.info("Calculated and returning 'yarn_timeline_app_cache_size' : {0}".format(yarn_timeline_app_cache_size))
return yarn_timeline_app_cache_size
"""
Reads YARN config 'yarn.timeline-service.entity-group-fs-store.app-cache-size'.
"""
def read_yarn_apptimelineserver_cache_size(self, services):
"""
:type services dict
:rtype str
"""
yarn_ats_app_cache_size = None
yarn_ats_app_cache_size_config = "yarn.timeline-service.entity-group-fs-store.app-cache-size"
yarn_site_in_services = self.getServicesSiteProperties(services, "yarn-site")
if yarn_site_in_services and yarn_ats_app_cache_size_config in yarn_site_in_services:
yarn_ats_app_cache_size = yarn_site_in_services[yarn_ats_app_cache_size_config]
Logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_ats_app_cache_size))
if not yarn_ats_app_cache_size:
Logger.error("'{0}' was not found in the services".format(yarn_ats_app_cache_size_config))
return yarn_ats_app_cache_size
def getMetadataConnectionString(self, database_type):
driverDict = {
'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
'derby': 'jdbc:derby://{0}:{2}/{1};create=true',
'postgresql': 'jdbc:postgresql://{0}:{2}/{1}'
}
return driverDict.get(database_type.lower())
def addToList(self, json_list, word):
desr_list = json.loads(json_list)
if word not in desr_list:
desr_list.append(word)
return json.dumps(desr_list)
def removeFromList(self, json_list, word):
desr_list = json.loads(json_list)
if word in desr_list:
desr_list.remove(word)
return json.dumps(desr_list)
def recommendDruidMaxMemoryLimitConfigurations(self, configurations, clusterData, services, hosts):
putEnvPropertyAttribute = self.putPropertyAttribute(configurations, "druid-env")
for component in ["DRUID_HISTORICAL", "DRUID_MIDDLEMANAGER", "DRUID_BROKER", "DRUID_OVERLORD",
"DRUID_COORDINATOR"]:
component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
totalAvailableMem = self.getMinMemory(component_hosts) / 1024 # In MB
nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
putEnvPropertyAttribute(format('druid.{nodeType}.jvm.heap.memory'), 'maximum',
max(totalAvailableMem, 1024))
DRUID_COMPONENT_NODE_TYPE_MAP = {
'DRUID_BROKER': 'broker',
'DRUID_COORDINATOR': 'coordinator',
'DRUID_HISTORICAL': 'historical',
'DRUID_MIDDLEMANAGER': 'middlemanager',
'DRUID_OVERLORD': 'overlord',
'DRUID_ROUTER': 'router'
}
def getMinMemory(self, component_hosts):
min_ram_kb = 1073741824 # 1 TB
for host in component_hosts:
ram_kb = host['Hosts']['total_mem']
min_ram_kb = min(min_ram_kb, ram_kb)
return min_ram_kb
def getMinCpu(self, component_hosts):
min_cpu = 256
for host in component_hosts:
cpu_count = host['Hosts']['cpu_count']
min_cpu = min(min_cpu, cpu_count)
return min_cpu
def getServiceConfigurationValidators(self):
parentValidators = super(ADH15StackAdvisor, self).getServiceConfigurationValidators()
childValidators = {
"DRUID": {"druid-env": self.validateDruidEnvConfigurations,
"druid-historical": self.validateDruidHistoricalConfigurations,
"druid-broker": self.validateDruidBrokerConfigurations},
"RANGER": {"ranger-ugsync-site": self.validateRangerUsersyncConfigurations},
"YARN" : {"yarn-site": self.validateYarnSiteConfigurations}
}
self.mergeValidators(parentValidators, childValidators)
return parentValidators
def validateDruidEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
# Minimum Direct memory Validation
envProperties = services['configurations']['druid-env']['properties']
for nodeType in ['broker', 'historical']:
properties = services['configurations'][format('druid-{nodeType}')]['properties']
intermediateBufferSize = int(properties['druid.processing.buffer.sizeBytes']) / (1024 * 1024) # In MBs
processingThreads = int(properties['druid.processing.numThreads'])
directMemory = int(envProperties[format('druid.{nodeType}.jvm.direct.memory')])
if directMemory < (processingThreads + 1) * intermediateBufferSize:
validationItems.extend(
{"config-name": format("druid.{nodeType}.jvm.direct.memory"), "item": self.getErrorItem(
format(
"Not enough direct memory available for {nodeType} Node."
"Please adjust druid.{nodeType}.jvm.direct.memory, druid.processing.buffer.sizeBytes, druid.processing.numThreads"
)
)
})
return self.toConfigurationValidationProblems(validationItems, "druid-env")
def validateYarnSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
siteProperties = services["configurations"]["yarn-site"]["properties"]
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if 'HIVE' in servicesList and "yarn-site" in services["configurations"] and "yarn.nodemanager.kill-escape.user" in \
services["configurations"]["yarn-site"]["properties"] and 'hive-env' in services['configurations'] and \
'hive_user' in services['configurations']['hive-env']['properties']:
hive_user = services['configurations']['hive-env']['properties']['hive_user']
yarn_nm_kill_escape_user = services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.kill-escape.user"]
if not hive_user in yarn_nm_kill_escape_user:
validationItems.append(
{"config-name": "yarn.nodemanager.kill-escape.user",
"item": self.getWarnItem("Value should contain %s" % hive_user)})
if services["configurations"]["yarn-site"]["properties"]["yarn.http.policy"] == 'HTTP_ONLY':
webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.address"]
propertyValue = "http://"+webapp_address+"/ws/v1/applicationhistory"
else:
webapp_address = services["configurations"]["yarn-site"]["properties"]["yarn.timeline-service.webapp.https.address"]
propertyValue = "https://"+webapp_address+"/ws/v1/applicationhistory"
Logger.info("validateYarnSiteConfigurations: recommended value for webservice url"+services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"])
if services["configurations"]["yarn-site"]["properties"]["yarn.log.server.web-service.url"] != propertyValue:
validationItems = [
{"config-name": "yarn.log.server.web-service.url",
"item": self.getWarnItem("Value should be %s" % propertyValue)}]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [
{"config-name": "druid.processing.numThreads",
"item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
"druid.processing.numThreads")}
]
return self.toConfigurationValidationProblems(validationItems, "druid-historical")
def validateDruidBrokerConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [
{"config-name": "druid.processing.numThreads",
"item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
"druid.processing.numThreads")}
]
return self.toConfigurationValidationProblems(validationItems, "druid-broker")
def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendTezConfigurations(configurations, clusterData, services, hosts)
putTezProperty = self.putProperty(configurations, "tez-site")
# TEZ JVM options
jvmGCParams = "-XX:+UseParallelGC"
if "ambari-server-properties" in services and "java.home" in services["ambari-server-properties"]:
# JDK8 needs different parameters
match = re.match(".*\/jdk(1\.\d+)[\-\_\.][^/]*$", services["ambari-server-properties"]["java.home"])
if match and len(match.groups()) > 0:
# Is version >= 1.8
versionSplits = re.split("\.", match.group(1))
if versionSplits and len(versionSplits) > 1 and int(versionSplits[0]) > 0 and int(versionSplits[1]) > 7:
jvmGCParams = "-XX:+UseG1GC -XX:+ResizeTLAB"
tez_jvm_opts = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA "
# Append 'jvmGCParams' and 'Heap Dump related option' (({{heap_dump_opts}}) Expanded while writing the
# configurations at start/restart time).
tez_jvm_updated_opts = tez_jvm_opts + jvmGCParams + "{{heap_dump_opts}}"
putTezProperty('tez.am.launch.cmd-opts', tez_jvm_updated_opts)
putTezProperty('tez.task.launch.cmd-opts', tez_jvm_updated_opts)
Logger.info("Updated 'tez-site' config 'tez.task.launch.cmd-opts' and 'tez.am.launch.cmd-opts' as "
": {0}".format(tez_jvm_updated_opts))
def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
putRangerUgsyncSite = self.putProperty(configurations, 'ranger-ugsync-site', services)
delta_sync_enabled = False
if 'ranger-ugsync-site' in services['configurations'] and 'ranger.usersync.ldap.deltasync' in services['configurations']['ranger-ugsync-site']['properties']:
delta_sync_enabled = services['configurations']['ranger-ugsync-site']['properties']['ranger.usersync.ldap.deltasync'] == "true"
if delta_sync_enabled:
putRangerUgsyncSite("ranger.usersync.group.searchenabled", "true")
else:
putRangerUgsyncSite("ranger.usersync.group.searchenabled", "false")
def validateRangerUsersyncConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
ranger_usersync_properties = properties
validationItems = []
delta_sync_enabled = 'ranger.usersync.ldap.deltasync' in ranger_usersync_properties \
and ranger_usersync_properties['ranger.usersync.ldap.deltasync'].lower() == 'true'
group_sync_enabled = 'ranger.usersync.group.searchenabled' in ranger_usersync_properties \
and ranger_usersync_properties['ranger.usersync.group.searchenabled'].lower() == 'true'
usersync_source_ldap_enabled = 'ranger.usersync.source.impl.class' in ranger_usersync_properties \
and ranger_usersync_properties['ranger.usersync.source.impl.class'] == 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder'
if usersync_source_ldap_enabled and delta_sync_enabled and not group_sync_enabled:
validationItems.append({"config-name": "ranger.usersync.group.searchenabled",
"item": self.getWarnItem(
"Need to set ranger.usersync.group.searchenabled as true, as ranger.usersync.ldap.deltasync is enabled")})
return self.toConfigurationValidationProblems(validationItems, "ranger-ugsync-site")
def recommendRangerKMSConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendRangerKMSConfigurations(configurations, clusterData, services, hosts)
putRangerKmsEnvProperty = self.putProperty(configurations, "kms-env", services)
ranger_kms_ssl_enabled = False
ranger_kms_ssl_port = "9393"
if 'ranger-kms-site' in services['configurations'] and 'ranger.service.https.attrib.ssl.enabled' in services['configurations']['ranger-kms-site']['properties']:
ranger_kms_ssl_enabled = services['configurations']['ranger-kms-site']['properties']['ranger.service.https.attrib.ssl.enabled'].lower() == "true"
if 'ranger-kms-site' in services['configurations'] and 'ranger.service.https.port' in services['configurations']['ranger-kms-site']['properties']:
ranger_kms_ssl_port = services['configurations']['ranger-kms-site']['properties']['ranger.service.https.port']
if ranger_kms_ssl_enabled:
putRangerKmsEnvProperty("kms_port", ranger_kms_ssl_port)
else:
putRangerKmsEnvProperty("kms_port", "9292")
def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
if 'hadoop-env' in services['configurations'] and 'hdfs_user' in services['configurations']['hadoop-env']['properties']:
hdfs_user = services['configurations']['hadoop-env']['properties']['hdfs_user']
else:
hdfs_user = 'hadoop'
if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in configurations['ranger-hdfs-plugin-properties']['properties']:
ranger_hdfs_plugin_enabled = (configurations['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
ranger_hdfs_plugin_enabled = (services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
else:
ranger_hdfs_plugin_enabled = False
if ranger_hdfs_plugin_enabled and 'ranger-hdfs-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
Logger.info("Setting HDFS Repo user for Ranger.")
putRangerHDFSPluginProperty = self.putProperty(configurations, "ranger-hdfs-plugin-properties", services)
putRangerHDFSPluginProperty("REPOSITORY_CONFIG_USERNAME",hdfs_user)
else:
Logger.info("Not setting HDFS Repo user for Ranger.")
def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
putHiveAtlasHookProperty = self.putProperty(configurations, "hive-atlas-application.properties", services)
putHiveAtlasHookPropertyAttribute = self.putPropertyAttribute(configurations,"hive-atlas-application.properties")
if 'hive-env' in services['configurations'] and 'hive_user' in services['configurations']['hive-env']['properties']:
hive_user = services['configurations']['hive-env']['properties']['hive_user']
else:
hive_user = 'hive'
if 'hive-env' in configurations and 'hive_security_authorization' in configurations['hive-env']['properties']:
ranger_hive_plugin_enabled = (configurations['hive-env']['properties']['hive_security_authorization'].lower() == 'ranger')
elif 'hive-env' in services['configurations'] and 'hive_security_authorization' in services['configurations']['hive-env']['properties']:
ranger_hive_plugin_enabled = (services['configurations']['hive-env']['properties']['hive_security_authorization'].lower() == 'ranger')
else :
ranger_hive_plugin_enabled = False
if ranger_hive_plugin_enabled and 'ranger-hive-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hive-plugin-properties']['properties']:
Logger.info("Setting Hive Repo user for Ranger.")
putRangerHivePluginProperty = self.putProperty(configurations, "ranger-hive-plugin-properties", services)
putRangerHivePluginProperty("REPOSITORY_CONFIG_USERNAME",hive_user)
else:
Logger.info("Not setting Hive Repo user for Ranger.")
security_enabled = self.isSecurityEnabled(services)
enable_atlas_hook = False
if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
if 'hive-atlas-application.properties' in services['configurations']:
if security_enabled and enable_atlas_hook:
putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'required')
putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'com.sun.security.auth.module.Krb5LoginModule')
putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'true')
else:
putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'delete', 'true')
putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'delete', 'true')
putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'delete', 'true')
# druid is not in list of services to be installed
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
if 'DRUID' in servicesList:
putHiveInteractiveSiteProperty = self.putProperty(configurations, "hive-interactive-site", services)
if 'druid-coordinator' in services['configurations']:
component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_COORDINATOR', services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
# pick the first
host = component_hosts[0]
druid_coordinator_host_port = str(host['Hosts']['host_name']) + ":" + str(
services['configurations']['druid-coordinator']['properties']['druid.port'])
else:
druid_coordinator_host_port = "localhost:8081"
if 'druid-router' in services['configurations']:
component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_ROUTER', services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
# pick the first
host = component_hosts[0]
druid_broker_host_port = str(host['Hosts']['host_name']) + ":" + str(
services['configurations']['druid-router']['properties']['druid.port'])
elif 'druid-broker' in services['configurations']:
component_hosts = self.getHostsWithComponent("DRUID", 'DRUID_BROKER', services, hosts)
if component_hosts is not None and len(component_hosts) > 0:
# pick the first
host = component_hosts[0]
druid_broker_host_port = str(host['Hosts']['host_name']) + ":" + str(
services['configurations']['druid-broker']['properties']['druid.port'])
else:
druid_broker_host_port = "localhost:8083"
druid_metadata_uri = ""
druid_metadata_user = ""
druid_metadata_type = ""
if 'druid-common' in services['configurations']:
druid_metadata_uri = services['configurations']['druid-common']['properties']['druid.metadata.storage.connector.connectURI']
druid_metadata_type = services['configurations']['druid-common']['properties']['druid.metadata.storage.type']
if 'druid.metadata.storage.connector.user' in services['configurations']['druid-common']['properties']:
druid_metadata_user = services['configurations']['druid-common']['properties']['druid.metadata.storage.connector.user']
else:
druid_metadata_user = ""
putHiveInteractiveSiteProperty('hive.druid.broker.address.default', druid_broker_host_port)
putHiveInteractiveSiteProperty('hive.druid.coordinator.address.default', druid_coordinator_host_port)
putHiveInteractiveSiteProperty('hive.druid.metadata.uri', druid_metadata_uri)
putHiveInteractiveSiteProperty('hive.druid.metadata.username', druid_metadata_user)
putHiveInteractiveSiteProperty('hive.druid.metadata.db.type', druid_metadata_type)
def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
if 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties']:
hbase_user = services['configurations']['hbase-env']['properties']['hbase_user']
else:
hbase_user = 'hbase'
if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in configurations['ranger-hbase-plugin-properties']['properties']:
ranger_hbase_plugin_enabled = (configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
ranger_hbase_plugin_enabled = (services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled'].lower() == 'Yes'.lower())
else:
ranger_hbase_plugin_enabled = False
if ranger_hbase_plugin_enabled and 'ranger-hbase-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
Logger.info("Setting Hbase Repo user for Ranger.")
putRangerHbasePluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
putRangerHbasePluginProperty("REPOSITORY_CONFIG_USERNAME",hbase_user)
else:
Logger.info("Not setting Hbase Repo user for Ranger.")
def recommendKAFKAConfigurations(self, configurations, clusterData, services, hosts):
super(ADH15StackAdvisor, self).recommendKAFKAConfigurations(configurations, clusterData, services, hosts)
if 'kafka-env' in services['configurations'] and 'kafka_user' in services['configurations']['kafka-env']['properties']:
kafka_user = services['configurations']['kafka-env']['properties']['kafka_user']
else:
kafka_user = "kafka"
if 'ranger-kafka-plugin-properties' in configurations and 'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
ranger_kafka_plugin_enabled = (configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-kafka-plugin-properties' in services['configurations'] and 'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
ranger_kafka_plugin_enabled = (services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
else:
ranger_kafka_plugin_enabled = False
if ranger_kafka_plugin_enabled and 'ranger-kafka-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
Logger.info("Setting Kafka Repo user for Ranger.")
putRangerKafkaPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
else:
Logger.info("Not setting Kafka Repo user for Ranger.")
def __addZeppelinToLivy2SuperUsers(self, configurations, services):
"""
If Kerberos is enabled AND Zeppelin is installed AND Spark2 Livy Server is installed, then set
livy2-conf/livy.superusers to contain the Zeppelin principal name from
zeppelin-env/zeppelin.server.kerberos.principal
:param configurations:
:param services:
"""
if self.isSecurityEnabled(services):
zeppelin_env = self.getServicesSiteProperties(services, "zeppelin-env")
if zeppelin_env and 'zeppelin.server.kerberos.principal' in zeppelin_env:
zeppelin_principal = zeppelin_env['zeppelin.server.kerberos.principal']
zeppelin_user = zeppelin_principal.split('@')[0] if zeppelin_principal else None
if zeppelin_user:
livy2_conf = self.getServicesSiteProperties(services, 'livy2-conf')
if livy2_conf:
superusers = livy2_conf['livy.superusers'] if livy2_conf and 'livy.superusers' in livy2_conf else None
# add the Zeppelin user to the set of users
if superusers:
_superusers = superusers.split(',')
_superusers = [x.strip() for x in _superusers]
_superusers = filter(None, _superusers) # Removes empty string elements from array
else:
_superusers = []
if zeppelin_user not in _superusers:
_superusers.append(zeppelin_user)
putLivy2ConfProperty = self.putProperty(configurations, 'livy2-conf', services)
putLivy2ConfProperty('livy.superusers', ','.join(_superusers))
def __isServiceDeployed(self, services, serviceName):
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
return serviceName in servicesList
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 30 10:18:12 2014
%reset -f
%clear
%pylab
%load_ext autoreload
%autoreload 2
@author: isaac
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline
import eqns
reload(eqns)
# setup better plots
import plots
reload(plots)
from plots import bmap, rcj, tl
# %% Load in the data
def load_song(fname):
data = np.genfromtxt('./Data/Song2008/{}.csv'.format(fname), delimiter=',')
return data
cl_010 = load_song('cl_010mm')
cl_025 = load_song('cl_025mm')
cd_010 = load_song('cd_010mm')
cd_025 = load_song('cd_025mm')
# %% Interpolate them to the same degree vector
def fix_angles(data, mod_rng=False, low=-20, high=60):
data[:, 0] = np.deg2rad(data[:, 0])
low, high = np.deg2rad(low), np.deg2rad(high)
if mod_rng:
if data[0, 0] > low:
print('Fixing low by {0:.6f}'.format(data[0, 0] - low))
data[0, 0] = low
if data[-1, 0] < high:
print('Fixing high by {0:.6f}'.format(data[-1, 0] - high))
data[-1, 0] = high
return data
cl_10 = fix_angles(cl_010)
cl_25 = fix_angles(cl_025)
cd_10 = fix_angles(cd_010)
cd_25 = fix_angles(cd_025)
# %%
# spline fit
ss, kk = .03, 5
cl_fun_10 = UnivariateSpline(*cl_10.T, s=ss, k=kk)
cl_fun_25 = UnivariateSpline(*cl_25.T, s=ss, k=kk)
cd_fun_10 = UnivariateSpline(*cd_10.T, s=ss, k=kk)
cd_fun_25 = UnivariateSpline(*cd_25.T, s=ss, k=kk)
clprime_fun_10 = cl_fun_10.derivative()
clprime_fun_25 = cl_fun_25.derivative()
cdprime_fun_10 = cd_fun_10.derivative()
cdprime_fun_25 = cd_fun_25.derivative()
# linear interpolation
ss, kk = 0, 1
Cl_fun_10 = UnivariateSpline(*cl_10.T, s=ss, k=kk)
Cl_fun_25 = UnivariateSpline(*cl_25.T, s=ss, k=kk)
Cd_fun_10 = UnivariateSpline(*cd_10.T, s=ss, k=kk)
Cd_fun_25 = UnivariateSpline(*cd_25.T, s=ss, k=kk)
Clprime_fun_10 = Cl_fun_10.derivative()
Clprime_fun_25 = Cl_fun_25.derivative()
Cdprime_fun_10 = Cd_fun_10.derivative()
Cdprime_fun_25 = Cd_fun_25.derivative()
# evaluate the fits
angle_min = np.deg2rad(-20)
angle_max = np.deg2rad(60)
al_inp = np.deg2rad(np.arange(-20, 61, 2))
al_spl = np.linspace(angle_min, angle_max, 501)
cl10 = cl_fun_10(al_spl)
cl25 = cl_fun_25(al_spl)
cd10 = cd_fun_10(al_spl)
cd25 = cd_fun_25(al_spl)
Cl10 = cl_fun_10(al_inp)
Cl25 = cl_fun_25(al_inp)
Cd10 = cd_fun_10(al_inp)
Cd25 = cd_fun_25(al_inp)
clprime10 = clprime_fun_10(al_spl)
clprime25 = clprime_fun_25(al_spl)
cdprime10 = cdprime_fun_10(al_spl)
cdprime25 = cdprime_fun_25(al_spl)
Clprime10 = clprime_fun_10(al_inp)
Clprime25 = clprime_fun_25(al_inp)
Cdprime10 = cdprime_fun_10(al_inp)
Cdprime25 = cdprime_fun_25(al_inp)
# %% Look at the data
fig, ax = plt.subplots()
ax.plot(al_inp, Cl10, 'o')
ax.plot(al_inp, Cl25, 'o')
ax.plot(al_inp, Cd10, 'o')
ax.plot(al_inp, Cd25, 'o')
ax.plot(al_spl, cl10, c=bmap[0])
ax.plot(al_spl, cl25, c=bmap[1])
ax.plot(al_spl, cd10, c=bmap[2])
ax.plot(al_spl, cd25, c=bmap[3])
ax.axhline(0, color='gray', lw=.75)
ax.set_xlabel(r'$\alpha$', fontsize=16)
ax.set_ylabel('force coefficients')
rcj(ax)
tl(fig)
# %% Cl, Cd, and ClCd curves for paper (updated)
# use use the .25 mm membrane for the paper
rd = np.rad2deg
fig, ax = plt.subplots()
ax.axvline(0, color='gray', lw=1)
ax.axhline(0, color='gray', lw=1)
ax.plot(rd(al_inp), Cl25, 'o', ms=6, label=r'$C_L$')
ax.plot(rd(al_inp), Cd25, 's', ms=6, label=r'$C_D$')
ax.plot(rd(al_inp), Cl25 / Cd25, '^', ms=6, label=r'$C_L/C_D$')
ax.plot(rd(al_spl), cl25, color=bmap[0], lw=1.5)
ax.plot(rd(al_spl), cd25, color=bmap[1], lw=1.5)
ax.plot(rd(al_spl), cl25 / cd25, color=bmap[2])
ax.set_xlim(-15, 65)
ax.set_ylim(-2, 3)
ax.legend(loc='lower right', frameon=False, fontsize=18)
ax.set_xlabel(r'$\alpha$', fontsize=18)
ax.set_ylabel('force coefficients', fontsize=18)
plt.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
ax.text(35, 2.5, 'airfoil squirrel', {'fontsize': 18})
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure4a_airfoil_squirrel.pdf', transparent=True)
# %% Find equilibrium points
pitches = np.deg2rad(np.linspace(-25, 25, 4000))
gammas = np.deg2rad(np.linspace(0, 60, 1000))
arng = (angle_min, angle_max)
from eqns import pitch_bifurcation as ptbi
so_Equil_10 = ptbi(pitches, gammas, Cl_fun_10, Cd_fun_10, angle_rng=arng)
so_equil_10 = ptbi(pitches, gammas, cl_fun_10, cd_fun_10, angle_rng=arng)
so_Equil_25 = ptbi(pitches, gammas, Cl_fun_25, Cd_fun_25, angle_rng=arng)
so_equil_25 = ptbi(pitches, gammas, cl_fun_25, cd_fun_25, angle_rng=arng)
# %% Classify the stability of fixed points
from eqns import tau_delta as td
so_TD_10, so_EV_10 = td(so_Equil_10, Cl_fun_10, Cd_fun_10, Clprime_fun_10,
Cdprime_fun_10, angle_rng=arng)
so_TD_25, so_EV_25 = td(so_Equil_25, Cl_fun_25, Cd_fun_25, Clprime_fun_25,
Cdprime_fun_25, angle_rng=arng)
so_td_10, so_ev_10 = td(so_equil_10, cl_fun_10, cd_fun_10, clprime_fun_10,
cdprime_fun_10, angle_rng=arng)
so_td_25, so_ev_25 = td(so_equil_25, cl_fun_25, cd_fun_25, clprime_fun_25,
cdprime_fun_25, angle_rng=arng)
_, _, so_Class_10 = eqns.classify_fp(so_TD_10)
_, _, so_Class_25 = eqns.classify_fp(so_TD_25)
_, _, so_class_10 = eqns.classify_fp(so_td_10)
_, _, so_class_25 = eqns.classify_fp(so_td_25)
possible_class = ['saddle point', 'unstable focus', 'unstable node',
'stable focus', 'stable node']
bfbmap = [bmap[0], bmap[4], bmap[2], bmap[3], bmap[1]]
# %% Spline bifurcation plot (deg) for paper
rd = np.rad2deg
gam_high = arng[0] - pitches # closer to 0
gam_low = arng[1] - pitches # closer to 90
fig, ax = plt.subplots()
ax.fill_between(rd(pitches), rd(gam_high), 0, color='gray', alpha=.1, lw=0)
ax.fill_between(rd(pitches), rd(gam_low), 60, color='gray', alpha=.1, lw=0)
ax.axvline(0, color='gray')
ax.axvline(2, color='gray')
for ii, fp_kind in enumerate(possible_class):
idx = np.where(so_class_25 == fp_kind)[0]
if len(idx) == 0:
continue
ax.plot(rd(so_equil_25[idx, 0]), rd(so_equil_25[idx, 1]), 'o',
c=bfbmap[ii], ms=2.5, label=fp_kind)
_leg = ax.legend(loc='upper right', markerscale=3, fancybox=True, framealpha=.75,
frameon=True, fontsize=16)
_leg.get_frame().set_color('w')
ax.set_xlim(-15, 15)
ax.set_ylim(60, 0)
#ax.set_ylabel(r'$\gamma^*$, equilibrium glide angle', fontsize=18)
#ax.set_xlabel(r'$\theta$, pitch angle', fontsize=18)
ax.set_ylabel(r'$\gamma^*$ ', fontsize=18, rotation=0)
ax.set_xlabel(r'$\theta$', fontsize=18)
ax.text(-13, 5, 'airfoil squirrel', {'fontsize': 18})
plt.draw()
# add degree symbol to angles
ticks = ax.get_xticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_xticklabels(newticks)
ticks = ax.get_yticklabels()
newticks = []
for tick in ticks:
text = tick.get_text()
newticks.append(text + u'\u00B0')
ax.set_yticklabels(newticks)
[ttl.set_size(18) for ttl in ax.get_xticklabels()]
[ttl.set_size(18) for ttl in ax.get_yticklabels()]
rcj(ax)
tl(fig)
fig.savefig('Figures/figure6a_bifurcation_airfoil_squirrel.pdf',
transparent=True)
# %% Plot the phase space, pitch = 0
#afdict_10 = dict(cli=cl_fun_10, cdi=cd_fun_10,
# clip=clprime_fun_10, cdip=cdprime_fun_10)
afdict_25 = dict(cli=cl_fun_25, cdi=cd_fun_25,
clip=clprime_fun_25, cdip=cdprime_fun_25)
pitch = 0
lims = (vxlim, vzlim) = np.r_[0, 1.25], np.r_[0, -1.25]
normalize = True
tvec = np.linspace(0, 30, 251)
import plots
reload(plots)
from plots import phase_plotter as ppr
fig, ax = ppr(afdict_25, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, traj=plots.ps_traj_dp5,
fig=None, ax=None)
lab = 'airfoil squirrel, ' + r'$\theta=$0' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5ai_vpd0_airfoil_squirrel.pdf', transparent=True)
# %% Plot the phase space, pitch = 2
pitch = np.deg2rad(2)
fig, ax = ppr(afdict_25, pitch, lims, arng, tvec, ngrid=201,
nseed=501, nseed_skip=25, quiver=False, skip=10, seed=False,
timer=True, gamtest=gammas, traj=plots.ps_traj_dp5,
fig=None, ax=None)
lab = 'airfoil squirrel, ' + r'$\theta=$2' + u'\u00B0'
ax.text(.05, -1, lab, fontsize=16)
fig.savefig('Figures/figure5aii_vpd2_airfoil_squirrel.pdf', transparent=True)
# %% Additional plots
fig, ax = plt.subplots()
for ii, fp_kind in enumerate(possible_class):
idx = np.where(so_class_25 == fp_kind)[0]
if len(idx) > 0:
geq = so_equil_25[idx, 1]
veq = eqns.v_equil(geq, cl_fun_25, cd_fun_25)
vxeq = veq * np.cos(geq)
vzeq = -veq * np.sin(geq)
ax.plot(vxeq, vzeq, 'o', c=bfbmap[ii], ms=2, label=fp_kind,
mec=bfbmap[ii])
ax.set_xlim(0, 1.5)
ax.set_ylim(-1.5, 0)
ax.set_xlabel(r"$\hat{v}_x$", fontsize=18)
ax.set_ylabel(r"$\hat{v}_z$", fontsize=18)
_leg = ax.legend(loc='best', markerscale=4, frameon=False, framealpha=.75)
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
rcj(ax, ['bottom', 'right'])
tl(fig)
fig.savefig('Figures/appendix_bifurcation_airfoil_squirrel.pdf',
transparent=True)
| |
# Copyright (c) 2015 The New Mexico Consortium
#
# {{{NMC-LICENSE
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# }}}
import re
import subprocess
from rtslib_fb import FabricModule, Target, TPG, BlockStorageObject
from rtslib_fb import NetworkPortal, NodeACL, LUN, MappedLUN, RTSRoot
from nmc_probe.log import Log
class TargetManager:
'Manages ZVOL based iSCSI targets for Emulab diskless booting'
# Constructor
def __init__(self):
self.block_store = {}
self.target = {}
self.root = RTSRoot()
self.iscsi = FabricModule('iscsi')
self.mapped_luns = {}
self.get_block_store_objects()
self.get_targets()
def save(self):
'''Save the current configuration'''
self.root.save_to_file()
# Get list of block storage objects
def get_block_store_objects(self):
self.block_store = {}
for storage_object in self.root.storage_objects:
if storage_object.plugin == "block":
self.block_store[storage_object.name] = storage_object
# Get a list of iscsi targets and associated luns, acls and portals
# This builds a data structure that is a hash that hash other hashes
# as values, and then other hashes, etc. To see what the data structure
# looks like, run targetcli from the command line and issue the ls command.
#
# This data structure mimics that list for fast lookup for creating
# shares for lots of nodes.
#
# This code is really confusing, in case you couldn't tell.
#
# target 0..N -> target.wwn
# |
# +---tpgs List of target portal groups, this code assumes only one
# | self.target[wwn]['tpg'][tpg.tag]['acl'][initiator_name']
# = mapped_lun
# |
# +--acls List of initiator names that can log into this iscsi
# target
# | self.target[wwn]['tpg'][tpg.tag]['acl'] = {
# initiator_name : acl
# }
# |
# +--luns List of LUNS for this TPG
# | self.target[wwn]['lun'][lun.storage_object.name] = lun
# |
# +--portals List of portals for this TPG
# self.target[wwn]['portal'][portal.ip_address:portal.port] = portal
# There can be any number of targets, each uniquely identified by its wwn
# (World Wide Name) which is also known as the initiator name. This is
# the unique name assigned to each client. The client knows about this
# name either by looking at its kernel parameters, the initiator name
# stored in the BIOS, but usually in /etc/iscsi/initiatorname.iscsi
#
# self.target[wwn]['tpg'] [tpg.tag] ['acl'] [initiator_name] =
# MappedLUN object
# self.target[wwn]['lun'] [lun_storage_object.name] = LUN object
# self.target[wwn]['portal'] [portal_id] = Portal object
#
def get_targets(self):
for target in list(self.iscsi.targets):
wwn = target.wwn
self.target[wwn] = {'target': target, 'tpg': {}}
for tpg in target.tpgs:
self.target[wwn]['tpg'][tpg.tag] = {
'tpg': tpg,
'acl': {},
'lun': {},
'portal': {}
}
tpg_tag = self.target[wwn]['tpg'][tpg.tag]
for acl in tpg.node_acls:
tpg_tag['acl'][acl.node_wwn] = acl
for lun in tpg.luns:
tpg_tag['lun'][lun.storage_object.name] = lun
for portal in tpg.network_portals:
portal_id = portal.ip_address + ":" + str(portal.port)
tpg_tag['portal'][portal_id] = portal
# Create a share
def create_iscsi_target(self, params):
"""Create an iSCSI target
Parameters
----------
params : dict
Dictionary of parameters
wwn: The World Wide Name of the share, eg, the IQN
device: the backing device
initiators: list of initiators
"""
wwn = params.get('wwn', None)
device = params.get('device', None)
initiators = params.get('initiators', None)
ip = params.get('ip', '0.0.0.0')
port = params.get('port', 3260)
# Something outside this library lowercase the wwn, so
# we lowercase the input to stay consistent
if wwn is not None:
wwn = wwn.lower()
# If at any step, something needs to be created,
# then true is returned to the caller to show that
# this iscsi target needed to be created.
#
# It is possible to call this method for an existing
# iscsi target, in which case this method does nothing
#
# By tracking this behavior, the caller can be informed
# whether or not any action was taken
created = None
# Create blockstore, if needed
blockstore = self.get_block_store(wwn)
if blockstore is None:
blockstore = self.create_block_store(wwn, device)
created = True
else:
Log.info('block backstore %s already exists, not creating' % (wwn))
# Create target
target = self.get_target(wwn)
if target is None:
target = self.create_target(wwn)
created = True
else:
Log.info('target %s already exists, not creating' % (wwn))
# Create TPG
tag = 1
tpg = self.get_tpg(target, tag)
if tpg is None:
tpg = self.create_tpg(target, tag)
created = True
else:
Log.info('tpg (%s, %s) already exists, not creating' %
(target, tag))
# Create LUN
# First, check to see if there are any LUNs. More than one LUN is not
# supported, so we just iterate over all (eg, the one) lun and set it.
# If there's more than one LUN, then the last one will be the LUN that
# is used, which may result in undefined behavior
lun = None
for lun in tpg.luns:
pass
if lun is None:
lun = self.create_lun(tpg, blockstore)
created = True
else:
Log.info('lun %s already exists, not creating' % (blockstore.name))
# Create portal
portal = self.get_portal(tpg, ip, port)
if portal is None:
portal = self.create_portal(tpg, ip, port)
created = True
else:
portal_id = self.get_portal_id(ip, port)
Log.info('portal %s already exists, not creating' % (portal_id))
# Set up ACLs and mapped LUNs
for initiator in initiators:
# Create ACL
acl = self.get_acl(tpg, initiator)
if acl is None:
acl = self.create_acl(tpg, initiator)
created = True
else:
Log.info('acl (%s, %s) already exists, not creating' %
(tpg, initiator))
# Map LUN
num = 0
# Like with LUNs, only one mapped lun is supported. Check for
# a mapped lun by iterating over the entire set of mapped luns,
# use the last one in the list, if any exist.
#
# If things are working properly, there should be only one
mapped_lun = None
for mapped_lun in acl.mapped_luns:
pass
if mapped_lun is None:
mapped_lun = self.create_mapped_lun(acl, num, lun)
created = True
else:
Log.info('mapped lun (%s, %s, %s) already exists' %
(acl, num, lun))
return created
def delete_target_and_block_store(self, params):
"""Delete an iSCSI target and block store. This does not delete the
underlying storage
Parameters
----------
target_wwn : string
The world wide name of the share to remove
"""
wwn = params.get('wwn', None)
if wwn is None:
raise ValueError('No wwn specified')
# Delete target
self.delete_target(wwn)
# Delete blockstore
self.delete_block_store(wwn)
def get_block_store(self, wwn):
"""Get an existing block store, if it exists
Parameters
----------
wwn : string
World Wide Name for the block store
device : string
Path to a block device
Returns:
--------
If the block store exists, then that object is returned.
Otherwise, None is returned
"""
return self.block_store.get(wwn, None)
def create_block_store(self, wwn, device):
"""Create a blockstore with the given wwn. It is assumed that the
blockstore does not already exists. Calling this method when the
storage already exists can potentially result in an exception being
thrown. Call get_block_store first to check for existence.
Parameters
----------
wwn : string
World Wide Name for the block store
device : string
Path to a block device
Returns:
--------
blockstore object, if it was successfully created
"""
Log.info('creating block backstore %s for device %s' % (wwn, device))
storage = BlockStorageObject(wwn, device, wwn)
self.block_store[wwn] = storage
return storage
# Delete blockstore, if it exists
def delete_block_store(self, name):
store = self.block_store.get(name)
# If blockstore doesn't exist, do not proceed
if store is None:
Log.info('No block store %s. Not deleting' % name)
return
Log.info('deleting block store %s' % (name))
# Delete the block store. The backing device, file, etc, still exists
store.delete()
del self.block_store[name]
# Delete target, if it exists
def delete_target(self, wwn):
# See if the target exists
target_dict = self.target.get(wwn, None)
# Doesn't exist, don't proceed
if target_dict is None:
Log.info('No target %s. Not deleting' % wwn)
return
target = target_dict.get('target', None)
# Surprising, but possible, because processes can die
# and the state can strange
if target is None:
return
Log.info('deleting target %s' % (wwn))
# Delete the target
target.delete()
del self.target[wwn]
def get_target(self, wwn):
'''Get an existing target object for the wwn
Parameters
----------
wwn : string
The wwn of the target
Returns
-------
The target object if it exists, None otherwise
'''
target_dict = self.target.get(wwn, None)
target = None
if target_dict is not None:
target = target_dict['target']
return target
# Create target, if needed
def create_target(self, wwn):
target_dict = self.target.get(wwn, None)
target = None
if target_dict is None:
Log.info('creating target with wwn %s' % (wwn))
# The wwn will be lowercased automatically by something
# outside this library. I'm not sure if its RTSLib or
# the underlying Linux target system
target = Target(self.iscsi, wwn)
# Add target to data structure, initialize empty child nodes
self.target[wwn] = {'target': target, 'tpg': {}}
else:
Log.info('target %s already exists, not creating' % (wwn))
target = target_dict['target']
return target
def get_tpg(self, target, tag):
'''Get a target portal group
Parameters
----------
target: Target
The target
tag: Tag
The tag
Returns
-------
The target portal group, if it exists, None otherwise
'''
tpg_list = self.target[target.wwn]['tpg']
tpg_list_tag = tpg_list.get(tag, None)
tpg = None
if tpg_list_tag is not None:
tpg = tpg_list[tag]['tpg']
return tpg
# Create TPG, if needed
def create_tpg(self, target, tag):
tpg_list = self.target[target.wwn]['tpg']
tpg_list_tag = tpg_list.get(tag, None)
if tpg_list_tag is None:
Log.info('creating tpg (%s, %s)' % (target, tag))
# Create and configure the target portal group
tpg = TPG(target, tag)
tpg.set_attribute("authentication", 0)
tpg.enable = 1
# Set up the list of TPGs for this target
tpg_list[tag] = {
'tpg': tpg,
'acl': {'mapped_lun': {}},
'lun': {},
'portal': {}
}
else:
Log.info('tpg (%s, %s) already exists, not creating' %
(target, tag))
tpg = tpg_list[tag]['tpg']
return tpg
# Create LUN, if needed
def create_lun(self, tpg, blockstore):
wwn = tpg.parent_target.wwn
lun_list = self.target[wwn]['tpg'][tpg.tag]['lun']
lun = lun_list.get(blockstore.name, None)
if lun is None:
Log.info('creating lun %s, blockstore %s' % (tpg, blockstore))
# Create the LUN
lun = LUN(tpg, 0, blockstore)
# Add it to the local data structure for tracking LUNs
lun_list[blockstore.name] = lun
else:
# LUN already exists
Log.info('lun %s already exists, not creating' % (blockstore.name))
return lun
def get_portal_id(self, ip, port):
return '%s:%d' % (ip, port)
def get_portal(self, tpg, ip, port):
portal = None
portal_id = self.get_portal_id(ip, port)
wwn = tpg.parent_target.wwn
portal_list = self.target[wwn]['tpg'][tpg.tag]['portal']
return portal_list.get(portal_id, None)
# Create portal, if needed
def create_portal(self, tpg, ip, port):
portal = None
portal_id = self.get_portal_id(ip, port)
wwn = tpg.parent_target.wwn
portal_list = self.target[wwn]['tpg'][tpg.tag]['portal']
if portal_id in portal_list:
Log.info('portal %s already exists, not creating' % (portal_id))
portal = portal_list[portal_id]
else:
Log.info('creating portal (%s, %s, %s)' % (tpg, ip, port))
portal = NetworkPortal(tpg, ip, port)
portal_list[portal_id] = portal
return portal
def get_acl(self, tpg, initiator_name):
acl = None
wwn = tpg.parent_target.wwn
acl_list = self.target[wwn]['tpg'][tpg.tag]['acl']
return acl_list.get(initiator_name, None)
# Create ACL, if needed
def create_acl(self, tpg, initiator_name):
acl = None
wwn = tpg.parent_target.wwn
acl_list = self.target[wwn]['tpg'][tpg.tag]['acl']
if initiator_name in acl_list:
Log.info('acl (%s, %s) already exists, not creating' %
(tpg, initiator_name))
acl = acl_list[initiator_name]
else:
Log.info('creating acl (%s, %s)' % (tpg, initiator_name))
acl = NodeACL(tpg, initiator_name)
acl_list[initiator_name] = acl
return acl
# Create mapped lun, if needed
def create_mapped_lun(self, acl, num, lun):
mapped_lun = None
if not list(acl.mapped_luns):
Log.info('creating mapped lun (%s, %s, %s)' % (acl, num, lun))
mapped_lun = MappedLUN(acl, num, lun)
else:
Log.info('mapped lun (%s, %s, %s) already exists' %
(acl, num, lun))
return mapped_lun
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteGatewaysOperations:
"""ExpressRouteGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_subscription(
self,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways under a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGatewayList":
"""Lists ExpressRoute gateways in a given resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> "_models.ExpressRouteGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_gateway_parameters, 'ExpressRouteGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_gateway_name: str,
put_express_route_gateway_parameters: "_models.ExpressRouteGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteGateway"]:
"""Creates or updates a ExpressRoute gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT
operation.
:type put_express_route_gateway_parameters: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
put_express_route_gateway_parameters=put_express_route_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> "_models.ExpressRouteGateway":
"""Fetches the details of a ExpressRoute gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway
resource can only be deleted when there are no connection subresources.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
"""
GeneticCode objects contain the rules for translating cDNA into a protein
sequence: the set of valid start and stop codons, as well as which
amino acid each DNA triplet is translated into.
"""
class GeneticCode(object):
"""
Represents distinct translation tables to go from cDNA triplets to amino
acids.
"""
def __init__(self, name, start_codons, stop_codons, codon_table):
self.name = name
self.start_codons = set(start_codons)
self.stop_codons = set(stop_codons)
self.codon_table = dict(codon_table)
self._check_codons()
def _check_codons(self):
"""
If codon table is missing stop codons, then add them.
"""
for stop_codon in self.stop_codons:
if stop_codon in self.codon_table:
if self.codon_table[stop_codon] != "*":
raise ValueError(
("Codon '%s' not found in stop_codons, but codon table "
"indicates that it should be") % (stop_codon,))
else:
self.codon_table[stop_codon] = "*"
for start_codon in self.start_codons:
if start_codon not in self.codon_table:
raise ValueError(
"Start codon '%s' missing from codon table" % (
start_codon,))
for codon, amino_acid in self.codon_table.items():
if amino_acid == "*" and codon not in self.stop_codons:
raise ValueError(
"Non-stop codon '%s' can't translate to '*'" % (
codon,))
if len(self.codon_table) != 64:
raise ValueError(
"Expected 64 codons but found %d in codon table" % (
len(self.codon_table,)))
def translate(self, cdna_sequence, first_codon_is_start=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
Is the first codon of the sequence a start codon?
"""
if not isinstance(cdna_sequence, str):
cdna_sequence = str(cdna_sequence)
n = len(cdna_sequence)
# trim to multiple of 3 length, if there are 1 or 2 nucleotides
# dangling at the end of an mRNA they will not affect translation
# since ribosome will fall off at that point
end_idx = 3 * (n // 3)
codon_table = self.codon_table
if first_codon_is_start and cdna_sequence[:3] in self.start_codons:
amino_acid_list = ['M']
start_index = 3
else:
start_index = 0
amino_acid_list = []
ends_with_stop_codon = False
for i in range(start_index, end_idx, 3):
codon = cdna_sequence[i:i + 3]
aa = codon_table[codon]
if aa == "*":
ends_with_stop_codon = True
break
amino_acid_list.append(aa)
amino_acids = "".join(amino_acid_list)
return amino_acids, ends_with_stop_codon
def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table)
standard_genetic_code = GeneticCode(
name="standard",
start_codons={'ATG', 'CTG', 'TTG'},
stop_codons={'TAA', 'TAG', 'TGA'},
codon_table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L',
'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S',
'TAT': 'Y', 'TAC': 'Y', 'TAA': '*', 'TAG': '*',
'TGT': 'C', 'TGC': 'C', 'TGA': '*', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',
'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E',
'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G'
}
)
# Non-canonical start sites based on figure 2 of
# "Global mapping of translation initiation sites in mammalian
# cells at single-nucleotide resolution"
standard_genetic_code_with_extra_start_codons = standard_genetic_code.copy(
name="standard-with-extra-start-codons",
start_codons=standard_genetic_code.start_codons.union({
'GTG',
'AGG',
'ACG',
'AAG',
'ATC',
'ATA',
'ATT'}))
vertebrate_mitochondrial_genetic_code = standard_genetic_code.copy(
name="verterbrate-mitochondrial",
# "For thirty years AGA and AGG were considered terminators instead
# of coding for arginine. However, Temperley (2010) has recently shown
# that human mitochondria use only UAA and UAG stop codons."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
stop_codons={'TAA', 'TAG'},
# "AUU codes for isoleucine during elongation but can code for
# methionine for initiation (ND2) See Fearnley & Walker (1987) and
# Peabody (1989)."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
start_codons=['ATT', 'ATC', 'ATA', 'ATG', 'GTG'],
# "UGA codes for tryptophan instead of termination and AUA codes for
# methionine instead of isoleucine."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
codon_table_changes={'TGA': 'W', 'ATA': 'M'},
)
def translate_cdna(
cdna_sequence,
first_codon_is_start=False,
mitochondrial=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
mitochondrial : bool
Use the mitochondrial codon table instead of standard
codon to amino acid table.
"""
# once we drop some of the prefix nucleotides, we should be in a reading frame
# which allows us to translate this protein
if mitochondrial:
genetic_code = vertebrate_mitochondrial_genetic_code
else:
genetic_code = standard_genetic_code_with_extra_start_codons
return genetic_code.translate(
cdna_sequence=cdna_sequence,
first_codon_is_start=first_codon_is_start)
| |
# -*- coding: iso-8859-15 -*-
"""Handling for raw EC2 security group rules"""
from sgtools import utils
from operator import itemgetter
from functools import total_ordering
import fileinput
import re
class InvalidRule(Exception):
pass
class MultipleNameMatches(Exception):
pass
# Module-level cache for group names. In a long-running process, this could be
# problematic as group names can change. When/if such an application arises,
# this simple dictionary-based cache should be replaced with one whose values
# can expire.
_groupname_cache = {}
def _prime_name_cache(aws):
groups = aws.ec2.SecurityGroups.get()
# add id => name mappings
_groupname_cache.update(dict((g["GroupId"], g["GroupName"]) for g in groups if g["GroupName"]))
# add name => id mappings
group_names = {}
for group in groups:
group_names.setdefault(group["GroupName"], []).append(group["GroupId"])
for group_name, group_ids in group_names:
if len(group_ids) == 1:
_groupname_cache[group_name] = group_ids[0]
def sgid_to_name(aws, group_id):
"""Find the group name for the given group ID"""
global _groupname_cache
if group_id not in _groupname_cache:
groups = aws.ec2.SecurityGroups.get(filters={'group-id': group_id})
if groups:
_groupname_cache[group_id] = groups[0]['GroupName']
return _groupname_cache.get(group_id, None)
def read_rules(filenames):
rules = RuleSet()
errors = []
fmtr = RuleFormatter()
for line in fileinput.input(filenames):
line = line.strip()
if line and not line.startswith("#"):
try:
rules.add(fmtr.parse_string(line))
except InvalidRule as e:
errors.append(e)
return rules, errors
def name_to_sgid(aws, name):
"""Find the group ID for the named security group. If more than one group
matches, MultipleNameMatches is raised."""
global _groupname_cache
if name not in _groupname_cache:
groups = aws.ec2.SecurityGroups.get(filters={'group-name': name})
if groups:
if len(groups) > 1:
raise MultipleNameMatches("{} has more than one EC2 security group "
"with the name '{}'".format(aws.region, name))
_groupname_cache[name] = groups[0]['GroupId']
return _groupname_cache.get(name, None)
class RuleFormatter(object):
"""Parse and format rules."""
patterns = {'Direction': "(?P<Direction>in|out)",
'Group': "(?P<Group>[^ ]+)",
'Other': "(?P<Other>[^ ]+)",
'IpProtocol': "(?P<IpProtocol>tcp|udp|icmp|[0-9-]+)",
'FromPort': "(?P<FromPort>None|[0-9-]+)",
'ToPort': "(?P<ToPort>None|[0-9-]+)"}
default_format = "{Direction} {Group} {Other} {IpProtocol} {FromPort} {ToPort}"
def __init__(self, for_account=None):
self.account = for_account
def parse_group(self, group):
"""Disassemble the other field into a dict of Rule fields."""
return {"GroupId": group}
def parse_other(self, other):
"""Disassemble the other field into a dict of Rule fields."""
if utils.parse_cidr(other):
return {"OtherCidrIp": other}
else:
before, sep, after = other.rpartition("/")
if before and before != self.account:
return {
'OtherUserId': before,
'OtherGroupId': after,
}
else:
return {'OtherGroupId': after}
def parse_string(self, rule_string, patt=default_format):
"""Create a Rule from a string"""
rule_string = rule_string.strip()
rule_re = re.compile(patt.format(**self.patterns))
match = rule_re.match(rule_string)
if not match:
raise InvalidRule("Rule string format invalid: {}".format(rule_string))
parts = match.groupdict()
parts.update(self.parse_group(parts["Group"]))
parts.update(self.parse_other(parts["Other"]))
return Rule(**parts)
def format_group(self, rule):
"""Assemble a group string using Rule fields."""
return rule["GroupId"]
def format_other(self, rule):
"""Assemble an other string using Rule fields."""
other = rule.other()
acct_prefix = "{}/".format(self.account)
if other.startswith(acct_prefix):
return other[len(acct_prefix):]
return other
def format_rule(self, rule, fmt=default_format):
"""Produce a string representation of a Rule"""
rule['Group'] = self.format_group(rule)
rule['Other'] = self.format_other(rule)
return fmt.format(**rule)
@total_ordering
class Rule(dict):
"""A distinct security group rule. A Rule may have the following keys:
- Direction
- GroupId
- OtherCidrIp
- OtherGroupId
- IpProtocol
- FromPort
- ToPort
"""
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
# force everything through __setitem__
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got {}".format(len(args)))
other = dict(*args, **kwargs)
for key in other:
self[key] = other[key]
@property
def _key(self):
gid = self["GroupId"]
perm = (self["IpProtocol"], self["FromPort"], self["ToPort"])
srcc = (self["Direction"], self.get("OtherCidrIp", ''))
srcg = (self["Direction"], self.get("OtherGroupId", ''))
return (gid, perm, srcc, srcg)
def __lt__(self, other):
# note that test coverage for this method is dependent on the operator
# used in the test. Use < to keep your numbers up.
return self._key < other._key
def __le__(self, other):
return self._key <= other._key
def __eq__(self, other):
# This implementation assumes that GroupId is unique across all
# accounts in the region.
#
# GroupId is the canonical group-level identifier. Others are ignored:
# - Description
# - GroupName
# - OwnerId
# - VpcId
#
# OtherGroupId is the canonical other group identifier. Others are ignored:
# - OtherUserId
# - OtherGroupName
return self._key == other._key
def __hash__(self):
return hash(self._key)
def __setitem__(self, key, value):
if key in ("FromPort", "ToPort"):
value = int(value)
protos = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
}
if key == 'IpProtocol':
value = protos.get(value, value)
super(Rule, self).__setitem__(key, value)
def other(self):
if "OtherCidrIp" in self and self["OtherCidrIp"]:
return self['OtherCidrIp']
else:
if "OtherUserId" in self and self["OtherUserId"]:
return "{}/{}".format(self['OtherUserId'], self['OtherGroupId'])
else:
return self['OtherGroupId']
def proto_spec(self):
return itemgetter("IpProtocol", "FromPort", "ToPort")(self)
class RuleSet(set):
grp_flds = ('Description', 'GroupId', 'GroupName', 'OwnerId', 'VpcId')
perm_flds = ('IpProtocol', 'FromPort', 'ToPort')
perm_dft = {'FromPort': -1, 'ToPort': -1}
othergrp_flds = ('UserId', 'GroupId', 'GroupName')
othercidr_flds = ('CidrIp',)
flat_othergrp_flds = list("Other" + f for f in othergrp_flds)
flat_othercidr_flds = list("Other" + f for f in othercidr_flds)
def add(self, rule):
if not isinstance(rule, Rule):
# coerce mapping types to Rules
rule = Rule(rule)
super(RuleSet, self).add(rule)
def flatten_groups(self, groups):
directional_lists = (("IpPermissions", "in"), ("IpPermissionsEgress", "out"))
for group in groups:
for perm_list, direction in directional_lists:
for perm in group[perm_list]:
for other in perm['UserIdGroupPairs']:
rule = Rule(Direction=direction)
utils.copy_fields(group, rule, self.grp_flds)
utils.copy_fields(perm, rule, self.perm_flds, defaults=self.perm_dft)
utils.copy_fields(other, rule, self.othergrp_flds, self.flat_othergrp_flds)
self.add(rule)
for other in perm['IpRanges']:
rule = Rule(Direction=direction)
utils.copy_fields(group, rule, self.grp_flds)
utils.copy_fields(perm, rule, self.perm_flds, defaults=self.perm_dft)
utils.copy_fields(other, rule, self.othercidr_flds, self.flat_othercidr_flds)
self.add(rule)
return self
def render_groups(self):
groups = []
rules = sorted(list(self))
# assumes sorting by these keys:
# - GroupId
# - (IpProtocol, FromPort, ToPort)
# - (Direction, OtherGroupId)
# - (Direction, OtherCidrIp)
current_group = None
current_perm = None
for rule in rules:
new_group = rule["GroupId"]
if new_group != current_group:
group = {}
utils.copy_fields(rule, group, self.grp_flds)
group['IpPermissions'] = []
group['IpPermissionsEgress'] = []
groups.append(group)
current_group = new_group
new_perm = (rule["IpProtocol"], rule["FromPort"], rule["ToPort"])
if new_perm != current_perm:
perm = dict(zip(self.perm_flds, new_perm))
perm['IpRanges'] = []
perm['UserIdGroupPairs'] = []
if rule["Direction"] == "in":
group['IpPermissions'].append(perm)
else:
group['IpPermissionsEgress'].append(perm)
current_perm = new_perm
if "OtherCidrIp" in rule:
perm['IpRanges'].append({"CidrIp": rule['OtherCidrIp']})
else:
group = {}
utils.copy_fields(rule, group, self.flat_othergrp_flds, self.othergrp_flds)
perm['UserIdGroupPairs'].append(group)
return groups
| |
from decimal import Decimal
from numpy.testing import TestCase, run_module_suite, assert_equal, \
assert_almost_equal, assert_array_equal, assert_array_almost_equal, \
assert_, dec
import scipy.signal as signal
from scipy.signal import lfilter, correlate, convolve, convolve2d, hilbert
from numpy import array, arange
import numpy as np
class _TestConvolve(TestCase):
def test_basic(self):
a = [3,4,5,6,5,4]
b = [1,2,3]
c = convolve(a,b)
assert_array_equal(c,array([3,10,22,28,32,32,23,12]))
def test_complex(self):
x = array([1+1j, 2+1j, 3+1j])
y = array([1+1j, 2+1j])
z = convolve(x, y)
assert_array_equal(z, array([2j, 2+6j, 5+8j, 5+5j]))
def test_zero_order(self):
a = 1289
b = 4567
c = convolve(a,b)
assert_array_equal(c,a*b)
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve(a,b)
d = array( [[2 ,7 ,16,17,12],\
[10,30,62,58,38],\
[12,31,58,49,30]])
assert_array_equal(c,d)
def test_valid_mode(self):
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
c = convolve(a,b,'valid')
assert_array_equal(c,array([70,78,73,65]))
class TestConvolve(_TestConvolve):
def test_valid_mode(self):
# 'valid' mode if b.size > a.size does not make sense with the new
# behavior
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
def _test():
convolve(a,b,'valid')
self.assertRaises(ValueError, _test)
def test_same_mode(self):
a = [1,2,3,3,1,2]
b = [1,4,3,4,5,6,7,4,3,2,1,1,3]
c = convolve(a,b,'same')
d = array([57,61,63,57,45,36])
assert_array_equal(c,d)
class _TestConvolve2d(TestCase):
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
d = array( [[2 ,7 ,16,17,12],\
[10,30,62,58,38],\
[12,31,58,49,30]])
e = convolve2d(a,b)
assert_array_equal(e,d)
def test_valid_mode(self):
e = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
f = [[1,2,3],[3,4,5]]
g = convolve2d(e,f,'valid')
h = array([[62,80,98,116,134]])
assert_array_equal(g,h)
def test_fillvalue(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
fillval = 1
c = convolve2d(a,b,'full','fill',fillval)
d = array([[24,26,31,34,32],\
[28,40,62,64,52],\
[32,46,67,62,48]])
assert_array_equal(c,d)
def test_wrap_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','wrap')
d = array([[80,80,74,80,80],\
[68,68,62,68,68],\
[80,80,74,80,80]])
assert_array_equal(c,d)
def test_sym_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','symm')
d = array([[34,30,44, 62, 66],\
[52,48,62, 80, 84],\
[82,78,92,110,114]])
assert_array_equal(c,d)
#class TestConvolve2d(_TestConvolve2d):
# def test_same_mode(self):
# e = [[1,2,3],[3,4,5]]
# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
# g = convolve2d(e,f,'same')
# h = array([[80,98,116],\
# [70,82,94]])
# assert_array_equal(g,h)
#
# def test_valid_mode2(self):
# # Test when in2.size > in1.size
# e = [[1,2,3],[3,4,5]]
# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
# def _test():
# convolve2d(e,f,'valid')
# self.assertRaises(ValueError, _test)
class TestFFTConvolve(TestCase):
def test_real(self):
x = array([1,2,3])
assert_array_almost_equal(signal.fftconvolve(x,x), [1,4,10,12,9.])
def test_complex(self):
x = array([1+1j,2+2j,3+3j])
assert_array_almost_equal(signal.fftconvolve(x,x),
[0+2.0j, 0+8j, 0+20j, 0+24j, 0+18j])
def test_2d_real_same(self):
a = array([[1,2,3],[4,5,6]])
assert_array_almost_equal(signal.fftconvolve(a,a),\
array([[1,4,10,12,9],\
[8,26,56,54,36],\
[16,40,73,60,36]]))
def test_2d_complex_same(self):
a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
c = signal.fftconvolve(a,a)
d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\
[10j,44j,118j,156j,122j],\
[3+4j,10+20j,21+56j,18+76j,11+60j]])
assert_array_almost_equal(c,d)
def test_real_same_mode(self):
a = array([1,2,3])
b = array([3,3,5,6,8,7,9,0,1])
c = signal.fftconvolve(a,b,'same')
d = array([9.,20.,25.,35.,41.,47.,39.,28.,2.])
assert_array_almost_equal(c,d)
def test_real_valid_mode(self):
a = array([3,2,1])
b = array([3,3,5,6,8,7,9,0,1])
c = signal.fftconvolve(a,b,'valid')
d = array([24.,31.,41.,43.,49.,25.,12.])
assert_array_almost_equal(c,d)
def test_zero_order(self):
a = array([4967])
b = array([3920])
c = signal.fftconvolve(a,b)
d = a*b
assert_equal(c,d)
def test_random_data(self):
np.random.seed(1234)
a = np.random.rand(1233) + 1j*np.random.rand(1233)
b = np.random.rand(1321) + 1j*np.random.rand(1321)
c = signal.fftconvolve(a, b, 'full')
d = np.convolve(a, b, 'full')
assert_(np.allclose(c, d, rtol=1e-10))
class TestMedFilt(TestCase):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[ 3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, np.float), [7, 3])
assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[ 0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[ 0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[ 0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
def test_none(self):
"""Ticket #1124. Ensure this does not segfault."""
try:
signal.medfilt(None)
except:
pass
class TestWiener(TestCase):
def test_basic(self):
g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d')
correct = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],
[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],
[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],
[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
h = signal.wiener(g)
assert_array_almost_equal(h,correct,decimal=6)
class TestCSpline1DEval(TestCase):
def test_basic(self):
y=array([1,2,3,4,3,2,1,2,3.0])
x=arange(len(y))
dx=x[1]-x[0]
cj = signal.cspline1d(y)
x2=arange(len(y)*10.0)/10.0
y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
class TestOrderFilt(TestCase):
def test_basic(self):
assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1),
[2,3,2])
class _TestLinearFilter(TestCase):
dt = None
def test_rank1(self):
x = np.linspace(0, 5, 6).astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, -0.5]).astype(self.dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test IIR with initial conditions
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1]).astype(self.dt)
y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt)
zf_r = np.array([5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
b = np.array([1, 1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1, 1]).astype(self.dt)
y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt)
zf_r = np.array([9, 5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank2(self):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6 ,4 ,2]],
dtype=self.dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=self.dt)
y = lfilter(b, a, x, axis = 0)
assert_array_almost_equal(y_r2_a0, y)
y = lfilter(b, a, x, axis = 1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank2_init_cond_a1(self):
# Test initial condition handling along axis 1
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_1 = np.array([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]], dtype=self.dt)
zf_r = np.array([-5, -17, -29, -41])[:, np.newaxis].astype(self.dt)
y, zf = lfilter(b, a, x, axis = 1, zi = np.ones((4, 1)))
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank2_init_cond_a0(self):
# Test initial condition handling along axis 0
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_0 = np.array([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5 ,3 ,1]],
dtype=self.dt)
zf_r = np.array([[-23, -23, -23]], dtype=self.dt)
y, zf = lfilter(b, a, x, axis = 0, zi = np.ones((1, 3)))
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank3(self):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
# Test last axis
y = lfilter(b, a, x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_empty_zi(self):
"""Regression test for #880: empty array for zi crashes."""
a = np.ones(1).astype(self.dt)
b = np.ones(1).astype(self.dt)
x = np.arange(5).astype(self.dt)
zi = np.ones(0).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
self.assertTrue(zf.dtype == self.dt)
self.assertTrue(zf.size == 0)
class TestLinearFilterFloat32(_TestLinearFilter):
dt = np.float32
class TestLinearFilterFloat64(_TestLinearFilter):
dt = np.float64
class TestLinearFilterFloatExtended(_TestLinearFilter):
dt = np.longdouble
class TestLinearFilterComplex64(_TestLinearFilter):
dt = np.complex64
class TestLinearFilterComplex128(_TestLinearFilter):
dt = np.complex128
class TestLinearFilterComplexxxiExtended28(_TestLinearFilter):
dt = np.longcomplex
class TestLinearFilterDecimal(_TestLinearFilter):
dt = np.dtype(Decimal)
class _TestCorrelateReal(TestCase):
dt = None
def _setup_rank1(self):
# a.size should be greated than b.size for the tests
a = np.linspace(0, 3, 4).astype(self.dt)
b = np.linspace(1, 2, 2).astype(self.dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(self.dt)
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r[1:4])
self.assertTrue(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r[:-1])
self.assertTrue(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r)
self.assertTrue(y.dtype == self.dt)
def _setup_rank3(self):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(self.dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(self.dt)
y_r = array([[[ 0., 184., 504., 912., 1360., 888., 472., 160.,],
[ 46., 432., 1062., 1840., 2672., 1698., 864., 266.,],
[ 134., 736., 1662., 2768., 3920., 2418., 1168., 314.,],
[ 260., 952., 1932., 3056., 4208., 2580., 1240., 332.,] ,
[ 202., 664., 1290., 1984., 2688., 1590., 712., 150.,] ,
[ 114., 344., 642., 960., 1280., 726., 296., 38.,]],
[[ 23., 400., 1035., 1832., 2696., 1737., 904., 293.,],
[ 134., 920., 2166., 3680., 5280., 3306., 1640., 474.,],
[ 325., 1544., 3369., 5512., 7720., 4683., 2192., 535.,],
[ 571., 1964., 3891., 6064., 8272., 4989., 2324., 565.,],
[ 434., 1360., 2586., 3920., 5264., 3054., 1312., 230.,],
[ 241., 700., 1281., 1888., 2496., 1383., 532., 39.,]],
[[ 22., 214., 528., 916., 1332., 846., 430., 132.,],
[ 86., 484., 1098., 1832., 2600., 1602., 772., 206.,],
[ 188., 802., 1698., 2732., 3788., 2256., 1018., 218.,],
[ 308., 1006., 1950., 2996., 4052., 2400., 1078., 230.,],
[ 230., 692., 1290., 1928., 2568., 1458., 596., 78.,],
[ 126., 354., 636., 924., 1212., 654., 234., 0.,]]],
dtype=self.dt)
return a, b, y_r
def test_rank3_valid(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "valid")
assert_array_almost_equal(y, y_r[1:2,2:4,3:5])
self.assertTrue(y.dtype == self.dt)
def test_rank3_same(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "same")
assert_array_almost_equal(y, y_r[0:-1,1:-1,1:-2])
self.assertTrue(y.dtype == self.dt)
def test_rank3_all(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b)
assert_array_almost_equal(y, y_r)
self.assertTrue(y.dtype == self.dt)
def _get_testcorrelate_class(datatype, base):
class TestCorrelateX(base):
dt = datatype
TestCorrelateX.__name__ = "TestCorrelate%s" % datatype.__name__.title()
return TestCorrelateX
for datatype in [np.ubyte, np.byte, np.ushort, np.short, np.uint, np.int,
np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble,
Decimal]:
cls = _get_testcorrelate_class(datatype, _TestCorrelateReal)
globals()[cls.__name__] = cls
class _TestCorrelateComplex(TestCase):
# The numpy data type to use.
dt = None
# The decimal precision to be used for comparing results.
# This value will be passed as the 'decimal' keyword argument of
# assert_array_almost_equal().
decimal = None
def _setup_rank1(self, mode):
np.random.seed(9)
a = np.random.randn(10).astype(self.dt)
a += 1j * np.random.randn(10).astype(self.dt)
b = np.random.randn(8).astype(self.dt)
b += 1j * np.random.randn(8).astype(self.dt)
y_r = (correlate(a.real, b.real, mode=mode) +
correlate(a.imag, b.imag, mode=mode)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode) +
correlate(a.imag, b.real, mode=mode))
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1('valid')
y = correlate(a, b, 'valid')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1('same')
y = correlate(a, b, 'same')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1('full')
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal)
self.assertTrue(y.dtype == self.dt)
def test_rank3(self):
a = np.random.randn(10, 8, 6).astype(self.dt)
a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
b = np.random.randn(8, 6, 4).astype(self.dt)
b += 1j * np.random.randn(8, 6, 4).astype(self.dt)
y_r = (correlate(a.real, b.real)
+ correlate(a.imag, b.imag)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag) + correlate(a.imag, b.real))
y = correlate(a, b, 'full')
assert_array_almost_equal(y, y_r, decimal=self.decimal-1)
self.assertTrue(y.dtype == self.dt)
# Create three classes, one for each complex data type. The actual class
# name will be TestCorrelateComplex###, where ### is the number of bits.
for datatype in [np.csingle, np.cdouble, np.clongdouble]:
cls = _get_testcorrelate_class(datatype, _TestCorrelateComplex)
cls.decimal = int(2 * np.finfo(datatype).precision / 3)
globals()[cls.__name__] = cls
class TestFiltFilt:
def test_basic(self):
out = signal.filtfilt([1,2,3], [1,2,3], np.arange(12))
assert_equal(out, arange(12))
class TestDecimate:
def test_basic(self):
x = np.arange(6)
assert_array_equal(signal.decimate(x, 2, n=1).round(), x[::2])
class TestHilbert:
def test_hilbert_theoretical(self):
#test cases by Ariel Rokem
decimal = 14
pi = np.pi
t = np.arange(0, 2*pi, pi/256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2*t)
a3 = np.cos(2*t)
a = np.vstack([a0,a1,a2,a3])
h = hilbert(a)
h_abs = np.abs(h)
h_angle = np.angle(h)
h_real = np.real(h)
#The real part should be equal to the original signals:
assert_almost_equal(h_real, a, decimal)
#The absolute value should be one everywhere, for this input:
assert_almost_equal(h_abs, np.ones(a.shape), decimal)
#For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
#the first 256 bins:
assert_almost_equal(h_angle[0,:256], np.arange(-pi/2,pi/2,pi/256),
decimal)
#For the 'slow' cosine - the phase should go from 0 to pi in the
#same interval:
assert_almost_equal(h_angle[1,:256], np.arange(0,pi,pi/256), decimal)
#The 'fast' sine should make this phase transition in half the time:
assert_almost_equal(h_angle[2,:128], np.arange(-pi/2,pi/2,pi/128),
decimal)
#Ditto for the 'fast' cosine:
assert_almost_equal(h_angle[3,:128], np.arange(0,pi,pi/128), decimal)
#The imaginary part of hilbert(cos(t)) = sin(t) Wikipedia
assert_almost_equal(h[1].imag, a0, decimal)
def test_hilbert_axisN(self):
# tests for axis and N arguments
a = np.arange(18).reshape(3,6)
# test axis
aa = hilbert(a, axis=-1)
yield assert_equal, hilbert(a.T, axis=0), aa.T
# test 1d
yield assert_equal, hilbert(a[0]), aa[0]
# test N
aan = hilbert(a, N=20, axis=-1)
yield assert_equal, aan.shape, [3,20]
yield assert_equal, hilbert(a.T, N=20, axis=0).shape, [20,3]
#the next test is just a regression test,
#no idea whether numbers make sense
a0hilb = np.array(
[ 0.000000000000000e+00-1.72015830311905j ,
1.000000000000000e+00-2.047794505137069j,
1.999999999999999e+00-2.244055555687583j,
3.000000000000000e+00-1.262750302935009j,
4.000000000000000e+00-1.066489252384493j,
5.000000000000000e+00+2.918022706971047j,
8.881784197001253e-17+3.845658908989067j,
-9.444121133484362e-17+0.985044202202061j,
-1.776356839400251e-16+1.332257797702019j,
-3.996802888650564e-16+0.501905089898885j,
1.332267629550188e-16+0.668696078880782j,
-1.192678053963799e-16+0.235487067862679j,
-1.776356839400251e-16+0.286439612812121j,
3.108624468950438e-16+0.031676888064907j,
1.332267629550188e-16-0.019275656884536j,
-2.360035624836702e-16-0.1652588660287j ,
0.000000000000000e+00-0.332049855010597j,
3.552713678800501e-16-0.403810179797771j,
8.881784197001253e-17-0.751023775297729j,
9.444121133484362e-17-0.79252210110103j ])
yield assert_almost_equal, aan[0], a0hilb, 14, 'N regression'
if __name__ == "__main__":
run_module_suite()
| |
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import stat
import string
import sys
import time
import unittest
import pywatchman
from watchman.integration.lib import WatchmanInstance
try:
import grp
except ImportError:
# Windows
pass
@unittest.skipIf(
os.name == "nt" or sys.platform == "darwin" or os.geteuid() == 0,
"win or root or bad ldap",
)
class TestSockPerms(unittest.TestCase):
def _new_instance(self, config, expect_success=True):
if expect_success:
start_timeout = 20
else:
# If the instance is going to fail anyway then there's no point
# waiting so long
start_timeout = 5
return WatchmanInstance.InstanceWithStateDir(
config=config, start_timeout=start_timeout
)
def _get_custom_gid(self):
# This is a bit hard to do: we need to find a group the user is a member
# of that's not the effective or real gid. If there are none then we
# must skip.
groups = os.getgroups()
for gid in groups:
if gid != os.getgid() and gid != os.getegid():
return gid
self.skipTest("no usable groups found")
def _get_non_member_group(self):
"""Get a group tuple that this user is not a member of."""
user_groups = set(os.getgroups())
for group in grp.getgrall():
if group.gr_gid not in user_groups:
return group
self.skipTest("no usable groups found")
def waitFor(self, cond, timeout=20):
deadline = time.time() + timeout
res = None
while time.time() < deadline:
try:
res = cond()
if res:
return [True, res]
except Exception:
pass
time.sleep(0.03)
return [False, res]
def assertWaitFor(self, cond, timeout=60, message=None, get_debug_output=None):
status, res = self.waitFor(cond, timeout)
if status:
return res
if message is None:
message = "%s was not met in %s seconds: %s" % (cond, timeout, res)
if get_debug_output is not None:
message += "\ndebug output:\n%s" % get_debug_output()
self.fail(message)
def test_too_open_user_dir(self):
instance = self._new_instance({}, expect_success=False)
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o777)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "the permissions on %s allow others to write to it" % (
instance.user_dir
)
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_invalid_sock_group(self):
# create a random group name
while True:
group_name = "".join(
random.choice(string.ascii_lowercase) for _ in range(8)
)
try:
grp.getgrnam(group_name)
except KeyError:
break
instance = self._new_instance({"sock_group": group_name}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
# This is the error we expect to find
wanted = "group '%s' does not exist" % group_name
# But if the site uses LDAP or YP/NIS or other similar technology for
# their password database then we might experience other infra flakeyness
# so we allow for the alternative error case to be present and consider
# it a pass.
we_love_ldap = "getting gid for '%s' failed:" % group_name
self.assertWaitFor(
lambda: (wanted in instance.getCLILogContents())
or (we_love_ldap in instance.getCLILogContents()),
get_debug_output=lambda: str(ctx.exception)
+ "\n"
+ instance.getCLILogContents(),
)
def test_user_not_in_sock_group(self):
group = self._get_non_member_group()
instance = self._new_instance(
{"sock_group": group.gr_name}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "setting up group '%s' failed" % group.gr_name
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_group(self):
# By default the socket group should be the effective gid of the process
gid = os.getegid()
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_custom_sock_group(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_user_previously_in_sock_group(self):
"""This tests the case where a user was previously in sock_group
(so Watchman created the directory with that group), but no longer is
(so the socket is created with a different group)."""
# Since it's hard to drop a group from a process without being
# superuser, fake it. Use a private testing-only config option to set
# up separate groups for the directory and the file.
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
non_member_group = self._get_non_member_group()
# Need to wait for the server to come up here, can't use
# expect_success=False.
instance = self._new_instance(
{"sock_group": group.gr_name, "__sock_file_group": non_member_group.gr_name}
)
with self.assertRaises(pywatchman.SocketConnectError):
instance.start()
wanted = (
"for socket '%s', gid %d doesn't match expected gid %d "
"(group name %s)."
% (
instance.getSockPath().unix_domain,
gid,
non_member_group.gr_gid,
non_member_group.gr_name,
)
)
self.assertWaitFor(lambda: wanted in instance.getServerLogContents())
def test_invalid_sock_access(self):
instance = self._new_instance({"sock_access": "bogus"}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access to be an object"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
instance = self._new_instance(
{"sock_access": {"group": "oui"}}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access.group to be a boolean"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_access(self):
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o700 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o600)
def test_custom_sock_access_group(self):
instance = self._new_instance({"sock_access": {"group": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_custom_sock_access_others(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_upgrade(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o700)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_downgrade(self):
instance = self._new_instance({"sock_access": {"group": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o755 | stat.S_ISGID)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_sock_access_group_change(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
os.makedirs(instance.user_dir)
# ensure that a different group is set
os.chown(instance.user_dir, -1, os.getegid())
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def assertFileMode(self, f, mode):
st = os.lstat(f)
self.assertEqual(stat.S_IMODE(st.st_mode), mode)
def assertFileGID(self, f, gid):
st = os.lstat(f)
self.assertEqual(st.st_gid, gid)
| |
from __future__ import unicode_literals
import hashlib
import time
import re
from xml.sax.saxutils import escape
import boto.sqs
from moto.core import BaseBackend
from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time_millis
from .utils import generate_receipt_handle
from .exceptions import (
ReceiptHandleIsInvalid,
MessageNotInflight
)
DEFAULT_ACCOUNT_ID = 123456789012
class Message(object):
def __init__(self, message_id, body):
self.id = message_id
self._body = body
self.message_attributes = {}
self.receipt_handle = None
self.sender_id = DEFAULT_ACCOUNT_ID
self.sent_timestamp = None
self.approximate_first_receive_timestamp = None
self.approximate_receive_count = 0
self.visible_at = 0
self.delayed_until = 0
@property
def md5(self):
body_md5 = hashlib.md5()
body_md5.update(self._body.encode('utf-8'))
return body_md5.hexdigest()
@property
def body(self):
return escape(self._body)
def mark_sent(self, delay_seconds=None):
self.sent_timestamp = unix_time_millis()
if delay_seconds:
self.delay(delay_seconds=delay_seconds)
def mark_received(self, visibility_timeout=None):
"""
When a message is received we will set the first receive timestamp,
tap the ``approximate_receive_count`` and the ``visible_at`` time.
"""
if visibility_timeout:
visibility_timeout = int(visibility_timeout)
else:
visibility_timeout = 0
if not self.approximate_first_receive_timestamp:
self.approximate_first_receive_timestamp = unix_time_millis()
self.approximate_receive_count += 1
# Make message visible again in the future unless its
# destroyed.
if visibility_timeout:
self.change_visibility(visibility_timeout)
self.receipt_handle = generate_receipt_handle()
def change_visibility(self, visibility_timeout):
# We're dealing with milliseconds internally
visibility_timeout_msec = int(visibility_timeout) * 1000
self.visible_at = unix_time_millis() + visibility_timeout_msec
def delay(self, delay_seconds):
delay_msec = int(delay_seconds) * 1000
self.delayed_until = unix_time_millis() + delay_msec
@property
def visible(self):
current_time = unix_time_millis()
if current_time > self.visible_at:
return True
return False
@property
def delayed(self):
current_time = unix_time_millis()
if current_time < self.delayed_until:
return True
return False
class Queue(object):
camelcase_attributes = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesDelayed',
'ApproximateNumberOfMessagesNotVisible',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'QueueArn',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout',
'WaitTimeSeconds']
def __init__(self, name, visibility_timeout, wait_time_seconds, region):
self.name = name
self.visibility_timeout = visibility_timeout or 30
self.region = region
# wait_time_seconds will be set to immediate return messages
self.wait_time_seconds = wait_time_seconds or 0
self._messages = []
now = time.time()
self.created_timestamp = now
self.delay_seconds = 0
self.last_modified_timestamp = now
self.maximum_message_size = 64 << 10
self.message_retention_period = 86400 * 4 # four days
self.queue_arn = 'arn:aws:sqs:us-east-1:123456789012:%s' % self.name
self.receive_message_wait_time_seconds = 0
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
sqs_backend = sqs_backends[region_name]
return sqs_backend.create_queue(
name=properties['QueueName'],
visibility_timeout=properties.get('VisibilityTimeout'),
wait_time_seconds=properties.get('WaitTimeSeconds')
)
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
queue_name = properties['QueueName']
sqs_backend = sqs_backends[region_name]
queue = sqs_backend.get_queue(queue_name)
if 'VisibilityTimeout' in properties:
queue.visibility_timeout = int(properties['VisibilityTimeout'])
if 'WaitTimeSeconds' in properties:
queue.wait_time_seconds = int(properties['WaitTimeSeconds'])
return queue
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
queue_name = properties['QueueName']
sqs_backend = sqs_backends[region_name]
sqs_backend.delete_queue(queue_name)
@property
def approximate_number_of_messages_delayed(self):
return len([m for m in self._messages if m.delayed])
@property
def approximate_number_of_messages_not_visible(self):
return len([m for m in self._messages if not m.visible])
@property
def approximate_number_of_messages(self):
return len(self.messages)
@property
def physical_resource_id(self):
return self.name
@property
def attributes(self):
result = {}
for attribute in self.camelcase_attributes:
result[attribute] = getattr(self, camelcase_to_underscores(attribute))
return result
@property
def url(self):
return "http://sqs.{0}.amazonaws.com/123456789012/{1}".format(self.region, self.name)
@property
def messages(self):
return [message for message in self._messages if message.visible and not message.delayed]
def add_message(self, message):
self._messages.append(message)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
return self.queue_arn
elif attribute_name == 'QueueName':
return self.name
raise UnformattedGetAttTemplateException()
class SQSBackend(BaseBackend):
def __init__(self, region_name):
self.region_name = region_name
self.queues = {}
super(SQSBackend, self).__init__()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_queue(self, name, visibility_timeout, wait_time_seconds):
queue = self.queues.get(name)
if queue is None:
queue = Queue(name, visibility_timeout, wait_time_seconds, self.region_name)
self.queues[name] = queue
return queue
def list_queues(self, queue_name_prefix):
re_str = '.*'
if queue_name_prefix:
re_str = '^{0}.*'.format(queue_name_prefix)
prefix_re = re.compile(re_str)
qs = []
for name, q in self.queues.items():
if prefix_re.search(name):
qs.append(q)
return qs
def get_queue(self, queue_name):
return self.queues.get(queue_name, None)
def delete_queue(self, queue_name):
if queue_name in self.queues:
return self.queues.pop(queue_name)
return False
def set_queue_attribute(self, queue_name, key, value):
queue = self.get_queue(queue_name)
setattr(queue, key, value)
return queue
def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None):
queue = self.get_queue(queue_name)
if delay_seconds:
delay_seconds = int(delay_seconds)
else:
delay_seconds = queue.delay_seconds
message_id = get_random_message_id()
message = Message(message_id, message_body)
if message_attributes:
message.message_attributes = message_attributes
message.mark_sent(
delay_seconds=delay_seconds
)
queue.add_message(message)
return message
def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout):
"""
Attempt to retrieve visible messages from a queue.
If a message was read by client and not deleted it is considered to be
"inflight" and cannot be read. We make attempts to obtain ``count``
messages but we may return less if messages are in-flight or there
are simple not enough messages in the queue.
:param string queue_name: The name of the queue to read from.
:param int count: The maximum amount of messages to retrieve.
:param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
"""
queue = self.get_queue(queue_name)
result = []
polling_end = time.time() + wait_seconds_timeout
# queue.messages only contains visible messages
while True:
for message in queue.messages:
if not message.visible:
continue
message.mark_received(
visibility_timeout=visibility_timeout
)
result.append(message)
if len(result) >= count:
break
if result or time.time() > polling_end:
break
return result
def delete_message(self, queue_name, receipt_handle):
queue = self.get_queue(queue_name)
new_messages = []
for message in queue._messages:
# Only delete message if it is not visible and the reciept_handle
# matches.
if message.receipt_handle == receipt_handle:
continue
new_messages.append(message)
queue._messages = new_messages
def change_message_visibility(self, queue_name, receipt_handle, visibility_timeout):
queue = self.get_queue(queue_name)
for message in queue._messages:
if message.receipt_handle == receipt_handle:
if message.visible:
raise MessageNotInflight
message.change_visibility(visibility_timeout)
return
raise ReceiptHandleIsInvalid
def purge_queue(self, queue_name):
queue = self.get_queue(queue_name)
queue._messages = []
sqs_backends = {}
for region in boto.sqs.regions():
sqs_backends[region.name] = SQSBackend(region.name)
| |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import itertools
import weakref
import eventlet
import eventlet.event
from oslo_log import log as logging
import six
from yaql.language import specs
from yaql.language import utils
from murano.common.i18n import _LW
from murano.dsl import attribute_store
from murano.dsl import constants
from murano.dsl import dsl
from murano.dsl import dsl_types
from murano.dsl import helpers
from murano.dsl import object_store
from murano.dsl.principal_objects import stack_trace
from murano.dsl import yaql_integration
LOG = logging.getLogger(__name__)
class MuranoDslExecutor(object):
def __init__(self, package_loader, context_manager, session=None):
self._package_loader = package_loader
self._context_manager = context_manager
self._session = session
self._attribute_store = attribute_store.AttributeStore()
self._object_store = object_store.ObjectStore(self)
self._locks = {}
self._root_context_cache = {}
@property
def object_store(self):
return self._object_store
@property
def attribute_store(self):
return self._attribute_store
@property
def package_loader(self):
return self._package_loader
@property
def context_manager(self):
return self._context_manager
def invoke_method(self, method, this, context, args, kwargs,
skip_stub=False):
with helpers.execution_session(self._session):
return self._invoke_method(
method, this, context, args, kwargs, skip_stub=skip_stub)
def _invoke_method(self, method, this, context, args, kwargs,
skip_stub=False):
if isinstance(this, dsl.MuranoObjectInterface):
this = this.object
kwargs = utils.filter_parameters_dict(kwargs)
runtime_version = method.declaring_type.package.runtime_version
yaql_engine = yaql_integration.choose_yaql_engine(runtime_version)
if context is None or not skip_stub:
actions_only = context is None and not method.name.startswith('.')
method_context = self.create_method_context(
self.create_object_context(this, context), method)
method_context[constants.CTX_SKIP_FRAME] = True
method_context[constants.CTX_ACTIONS_ONLY] = actions_only
stub = method.static_stub if isinstance(
this, dsl_types.MuranoType) else method.instance_stub
if stub is None:
raise ValueError(
'Method {0} cannot be called on receiver {1}'.format(
method, this))
return stub(yaql_engine, method_context, this.real_this)(
*args, **kwargs)
if (context[constants.CTX_ACTIONS_ONLY] and method.usage !=
dsl_types.MethodUsages.Action):
raise Exception('{0} is not an action'.format(method.name))
if method.is_static:
obj_context = self.create_object_context(
method.declaring_type, context)
else:
obj_context = self.create_object_context(this, context)
context = self.create_method_context(obj_context, method)
if isinstance(this, dsl_types.MuranoObject):
this = this.real_this
if method.arguments_scheme is not None:
args, kwargs = self._canonize_parameters(
method.arguments_scheme, args, kwargs)
with self._acquire_method_lock(method, this):
for i, arg in enumerate(args, 2):
context[str(i)] = arg
for key, value in six.iteritems(kwargs):
context[key] = value
def call():
if isinstance(method.body, specs.FunctionDefinition):
if isinstance(this, dsl_types.MuranoType):
native_this = this.get_reference()
else:
native_this = dsl.MuranoObjectInterface(this.cast(
method.declaring_type), self)
return method.body(
yaql_engine, context, native_this)(*args, **kwargs)
else:
context[constants.CTX_NAMES_SCOPE] = \
method.declaring_type
return (None if method.body is None
else method.body.execute(context))
if (not isinstance(method.body, specs.FunctionDefinition) or
not method.body.meta.get(constants.META_NO_TRACE)):
with self._log_method(context, args, kwargs) as log:
result = call()
log(result)
return result
else:
return call()
@contextlib.contextmanager
def _acquire_method_lock(self, method, this):
method_id = id(method)
if method.is_static:
this_id = id(method.declaring_type)
else:
this_id = this.object_id
thread_id = helpers.get_current_thread_id()
while True:
event, event_owner = self._locks.get(
(method_id, this_id), (None, None))
if event:
if event_owner == thread_id:
event = None
break
else:
event.wait()
else:
event = eventlet.event.Event()
self._locks[(method_id, this_id)] = (event, thread_id)
break
try:
yield
finally:
if event is not None:
del self._locks[(method_id, this_id)]
event.send()
@contextlib.contextmanager
def _log_method(self, context, args, kwargs):
method = helpers.get_current_method(context)
param_gen = itertools.chain(
(six.text_type(arg) for arg in args),
(u'{0} => {1}'.format(name, value)
for name, value in six.iteritems(kwargs)))
params_str = u', '.join(param_gen)
method_name = '::'.join((method.declaring_type.name, method.name))
thread_id = helpers.get_current_thread_id()
caller_str = ''
caller_ctx = helpers.get_caller_context(context)
if caller_ctx is not None:
frame = stack_trace.compose_stack_frame(caller_ctx)
if frame['location']:
caller_str = ' called from ' + stack_trace.format_frame(frame)
LOG.trace(u'{thread}: Begin execution {method}({params}){caller}'
.format(thread=thread_id, method=method_name,
params=params_str, caller=caller_str))
try:
def log_result(result):
LOG.trace(
u'{thread}: End execution {method} with result '
u'{result}'.format(
thread=thread_id, method=method_name, result=result))
yield log_result
except Exception as e:
LOG.trace(
u'{thread}: End execution {method} with exception '
u'{exc}'.format(thread=thread_id, method=method_name, exc=e))
raise
@staticmethod
def _canonize_parameters(arguments_scheme, args, kwargs):
arg_names = list(arguments_scheme.keys())
parameter_values = utils.filter_parameters_dict(kwargs)
for i, arg in enumerate(args):
name = arg_names[i]
parameter_values[name] = arg
return tuple(), parameter_values
def load(self, data):
with helpers.execution_session(self._session):
return self._load(data)
def _load(self, data):
if not isinstance(data, dict):
raise TypeError()
self._attribute_store.load(data.get(constants.DM_ATTRIBUTES) or [])
result = self._object_store.load(data.get(constants.DM_OBJECTS), None)
if result is None:
return None
return dsl.MuranoObjectInterface(result, executor=self)
def cleanup(self, data):
with helpers.execution_session(self._session):
return self._cleanup(data)
def _cleanup(self, data):
objects_copy = data.get(constants.DM_OBJECTS_COPY)
if not objects_copy:
return
gc_object_store = object_store.ObjectStore(self)
gc_object_store.load(objects_copy, None)
objects_to_clean = []
for object_id in self._list_potential_object_ids(objects_copy):
if (gc_object_store.has(object_id) and
not self._object_store.has(object_id)):
obj = gc_object_store.get(object_id)
objects_to_clean.append(obj)
if objects_to_clean:
for obj in objects_to_clean:
methods = obj.type.find_methods(lambda m: m.name == '.destroy')
for method in methods:
try:
method.invoke(self, obj, (), {}, None)
except Exception as e:
LOG.warning(_LW(
'Muted exception during execution of .destroy '
'on {0}: {1}').format(obj, e), exc_info=True)
def _list_potential_object_ids(self, data):
if isinstance(data, dict):
sys_dict = data.get('?')
if (isinstance(sys_dict, dict) and
sys_dict.get('id') and sys_dict.get('type')):
yield sys_dict['id']
for val in six.itervalues(data):
for res in self._list_potential_object_ids(val):
yield res
elif isinstance(data, collections.Iterable) and not isinstance(
data, six.string_types):
for val in data:
for res in self._list_potential_object_ids(val):
yield res
def create_root_context(self, runtime_version):
context = self._root_context_cache.get(runtime_version)
if not context:
context = self.context_manager.create_root_context(runtime_version)
context = context.create_child_context()
context[constants.CTX_EXECUTOR] = weakref.ref(self)
context[constants.CTX_PACKAGE_LOADER] = weakref.ref(
self._package_loader)
context[constants.CTX_EXECUTION_SESSION] = self._session
context[constants.CTX_ATTRIBUTE_STORE] = weakref.ref(
self._attribute_store)
self._root_context_cache[runtime_version] = context
return context
def create_package_context(self, package):
root_context = self.create_root_context(package.runtime_version)
context = helpers.link_contexts(
root_context,
self.context_manager.create_package_context(package))
return context
def create_type_context(self, murano_type):
package_context = self.create_package_context(
murano_type.package)
context = helpers.link_contexts(
package_context,
self.context_manager.create_type_context(
murano_type)).create_child_context()
context[constants.CTX_TYPE] = murano_type
return context
def create_object_context(self, obj, caller_context=None):
if isinstance(obj, dsl_types.MuranoClass):
obj_type = obj
obj = None
else:
obj_type = obj.type
class_context = self.create_type_context(obj_type)
if obj is not None:
context = helpers.link_contexts(
class_context, self.context_manager.create_object_context(
obj)).create_child_context()
context[constants.CTX_THIS] = obj.real_this
context['this'] = obj.real_this
context[''] = obj.real_this
else:
context = class_context.create_child_context()
type_ref = obj_type.get_reference()
context[constants.CTX_THIS] = type_ref
context['this'] = type_ref
context[''] = type_ref
if caller_context is not None:
caller = caller_context
while caller is not None and caller[constants.CTX_SKIP_FRAME]:
caller = caller[constants.CTX_CALLER_CONTEXT]
context[constants.CTX_NAMES_SCOPE] = caller_context[
constants.CTX_NAMES_SCOPE]
context[constants.CTX_CALLER_CONTEXT] = caller
context[constants.CTX_ALLOW_PROPERTY_WRITES] = caller_context[
constants.CTX_ALLOW_PROPERTY_WRITES]
else:
context[constants.CTX_NAMES_SCOPE] = obj_type
return context
@staticmethod
def create_method_context(object_context, method):
context = object_context.create_child_context()
context[constants.CTX_CURRENT_METHOD] = method
return context
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2019 Edgewall Software
# Copyright (C) 2006-2011, Herbert Valerio Riedel <hvr@gnu.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from datetime import datetime
import itertools
import os
from trac.api import ISystemInfoProvider
from trac.cache import cached
from trac.config import BoolOption, IntOption, ListOption, PathOption, Option
from trac.core import Component, TracError, implements
from trac.util import shorten_line
from trac.util.datefmt import FixedOffset, to_timestamp, format_datetime
from trac.util.html import Markup, tag
from trac.util.text import to_unicode, exception_to_unicode
from trac.util.translation import _
from trac.versioncontrol.api import Changeset, Node, Repository, \
IRepositoryConnector, InvalidRepository,\
NoSuchChangeset, NoSuchNode, \
RepositoryManager, IRepositoryProvider
from trac.versioncontrol.cache import CACHE_YOUNGEST_REV, CachedRepository, \
CachedChangeset
from trac.versioncontrol.web_ui import IPropertyRenderer
from trac.web.chrome import Chrome
from trac.wiki import IWikiSyntaxProvider
from tracopt.versioncontrol.git import PyGIT
class GitCachedRepository(CachedRepository):
"""Git-specific cached repository."""
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, path):
return self.repos.short_rev(path)
def normalize_rev(self, rev):
if not rev:
return self.get_youngest_rev()
normrev = self.repos.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def get_youngest_rev(self):
# return None if repository is empty
return CachedRepository.get_youngest_rev(self) or None
def child_revs(self, rev):
return self.repos.child_revs(rev)
def get_changesets(self, start, stop):
for key, csets in itertools.groupby(
CachedRepository.get_changesets(self, start, stop),
key=lambda cset: cset.date):
csets = list(csets)
if len(csets) == 1:
yield csets[0]
continue
rev_csets = {cset.rev: cset for cset in csets}
while rev_csets:
revs = [rev for rev in rev_csets
if not any(r in rev_csets
for r in self.repos.child_revs(rev))]
for rev in sorted(revs):
yield rev_csets.pop(rev)
def get_changeset(self, rev):
return GitCachedChangeset(self, self.normalize_rev(rev), self.env)
def sync(self, feedback=None, clean=False):
if clean:
self.remove_cache()
metadata = self.metadata
self.save_metadata(metadata)
meta_youngest = metadata.get(CACHE_YOUNGEST_REV, '')
repos = self.repos
def is_synced(rev):
for count, in self.env.db_query("""
SELECT COUNT(*) FROM revision WHERE repos=%s AND rev=%s
""", (self.id, rev)):
return count > 0
return False
def needs_sync():
max_holders = 999
revs = sorted(set(rev for refname, rev in repos.git.get_refs()))
step = max_holders - 1
for idx in xrange(0, len(revs), step):
revs_ = revs[idx:idx + step]
holders = ','.join(('%s',) * len(revs_))
args = [self.id]
args.extend(revs_)
query = """SELECT COUNT(*) FROM revision
WHERE repos=%%s AND rev IN (%s)""" % holders
for count, in self.env.db_query(query, args):
if count < len(revs_):
return True
return False
def traverse(rev, seen):
revs = []
merge_revs = []
while True:
if rev in seen:
break
seen.add(rev)
if is_synced(rev):
break
revs.append(rev)
parent_revs = repos.parent_revs(rev)
if not parent_revs: # root commit?
break
rev = parent_revs[0]
if len(parent_revs) > 1:
merge_revs.append((len(revs), parent_revs[1:]))
for idx, parent_revs in reversed(merge_revs):
for rev in parent_revs:
revs[idx:idx] = traverse(rev, seen)
return revs
def sync_revs():
updated = False
seen = set()
for rev in repos.git.all_revs():
if repos.child_revs(rev):
continue
revs = traverse(rev, seen) # topology ordered
while revs:
# sync revision from older revision to newer revision
rev = revs.pop()
self.log.info("Trying to sync revision [%s]", rev)
cset = repos.get_changeset(rev)
try:
self.insert_changeset(rev, cset)
updated = True
except self.env.db_exc.IntegrityError as e:
self.log.info('Revision %s already cached: %r', rev, e)
continue
if feedback:
feedback(rev)
return updated
with self.env.db_query:
while True:
repos.sync()
if needs_sync() and sync_revs():
continue # sync again
repos_youngest = repos.youngest_rev or ''
if meta_youngest != repos_youngest:
with self.env.db_transaction as db:
db("""
UPDATE repository SET value=%s
WHERE id=%s AND name=%s
""", (repos_youngest, self.id, CACHE_YOUNGEST_REV))
del self.metadata
return
class GitCachedChangeset(CachedChangeset):
"""Git-specific cached changeset."""
def get_branches(self):
_rev = self.rev
return [(k, v == _rev) for k, v in
self.repos.repos.git.get_branch_contains(_rev, resolve=True)]
def get_tags(self):
return self.repos.repos.git.get_tags(self.rev)
def _last_iterable(iterable):
"""helper for detecting last iteration in for-loop"""
i = iter(iterable)
v = next(i)
for nextv in i:
yield False, v
v = nextv
yield True, v
def intersperse(sep, iterable):
"""The 'intersperse' generator takes an element and an iterable and
intersperses that element between the elements of the iterable.
inspired by Haskell's ``Data.List.intersperse``
"""
for i, item in enumerate(iterable):
if i: yield sep
yield item
# helper
def _parse_user_time(s):
"""Parse author or committer attribute lines and return
corresponding ``(user, timestamp)`` pair.
"""
user, time, tz_str = s.rsplit(None, 2)
tz = FixedOffset((int(tz_str) * 6) / 10, tz_str)
time = datetime.fromtimestamp(float(time), tz)
return user, time
_file_type_mask = 0170000
def _is_dir(mode):
if mode is None:
return False
return (mode & _file_type_mask) in (0040000, 0160000)
def _is_submodule(mode):
if mode is None:
return False
return (mode & _file_type_mask) == 0160000
class GitConnector(Component):
implements(IRepositoryConnector, ISystemInfoProvider, IWikiSyntaxProvider)
def __init__(self):
self._version = None
try:
self._version = PyGIT.Storage.git_version(git_bin=self.git_bin)
except PyGIT.GitError as e:
self.log.error("GitError: %s", e)
if self._version:
self.log.info("detected GIT version %s", self._version['v_str'])
if not self._version['v_compatible']:
self.log.error("GIT version %s installed not compatible"
"(need >= %s)", self._version['v_str'],
self._version['v_min_str'])
# ISystemInfoProvider methods
def get_system_info(self):
if self._version:
yield 'GIT', self._version['v_str']
# IWikiSyntaxProvider methods
def _format_sha_link(self, formatter, sha, label):
# FIXME: this function needs serious rethinking...
reponame = ''
context = formatter.context
while context:
if context.resource.realm in ('source', 'changeset'):
reponame = context.resource.parent.id
break
context = context.parent
try:
repos = RepositoryManager(self.env).get_repository(reponame)
if not repos:
raise Exception("Repository '%s' not found" % reponame)
sha = repos.normalize_rev(sha) # in case it was abbreviated
changeset = repos.get_changeset(sha)
return tag.a(label, class_='changeset',
title=shorten_line(changeset.message),
href=formatter.href.changeset(sha, repos.reponame))
except Exception as e:
return tag.a(label, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
def get_wiki_syntax(self):
yield (r'(?:\b|!)r?[0-9a-fA-F]{%d,40}\b' % self.wiki_shortrev_len,
lambda fmt, sha, match:
self._format_sha_link(fmt, sha.startswith('r')
and sha[1:] or sha, sha))
def get_link_resolvers(self):
yield ('sha', lambda fmt, _, sha, label, match=None:
self._format_sha_link(fmt, sha, label))
# IRepositoryConnector methods
persistent_cache = BoolOption('git', 'persistent_cache', 'false',
"""Enable persistent caching of commit tree.""")
cached_repository = BoolOption('git', 'cached_repository', 'false',
"""Wrap `GitRepository` in `CachedRepository`.""")
shortrev_len = IntOption('git', 'shortrev_len', 7,
"""The length at which a sha1 is abbreviated (must be >= 4
and <= 40).
""")
wiki_shortrev_len = IntOption('git', 'wikishortrev_len', 40,
"""The minimum length at which a hex-string in wiki content
is formatted as a changeset TracLink (must be >= 4 and <= 40).
""")
trac_user_rlookup = BoolOption('git', 'trac_user_rlookup', 'false',
"""Enable reverse mapping of git email addresses to trac user ids.
Performance will be reduced if there are many users and the
`cached_repository` option is `disabled`.
A repository resync is required after changing the value of this
option.
""")
use_committer_id = BoolOption('git', 'use_committer_id', 'true',
"""Use git-committer id instead of git-author id for the
changeset ''Author'' field.
""")
use_committer_time = BoolOption('git', 'use_committer_time', 'true',
"""Use git-committer timestamp instead of git-author timestamp
for the changeset ''Timestamp'' field.
""")
git_fs_encoding = Option('git', 'git_fs_encoding', 'utf-8',
"""Define charset encoding of paths within git repositories.""")
git_bin = Option('git', 'git_bin', 'git',
"""Path to the git executable.""")
def get_supported_types(self):
yield ('git', 8)
def get_repository(self, type, dir, params):
"""GitRepository factory method"""
assert type == 'git'
if not (4 <= self.shortrev_len <= 40):
raise TracError(_("%(option)s must be in the range [4..40]",
option="[git] shortrev_len"))
if not (4 <= self.wiki_shortrev_len <= 40):
raise TracError(_("%(option)s must be in the range [4..40]",
option="[git] wikishortrev_len"))
if not self._version:
raise TracError(_("GIT backend not available"))
elif not self._version['v_compatible']:
raise TracError(_("GIT version %(hasver)s installed not "
"compatible (need >= %(needsver)s)",
hasver=self._version['v_str'],
needsver=self._version['v_min_str']))
if self.trac_user_rlookup:
def rlookup_uid(email):
"""Reverse map 'real name <user@domain.tld>' addresses to trac
user ids.
:return: `None` if lookup failed
"""
try:
_, email = email.rsplit('<', 1)
email, _ = email.split('>', 1)
email = email.lower()
except Exception:
return None
for _uid, _name, _email in self.env.get_known_users():
try:
if email == _email.lower():
return _uid
except Exception:
continue
else:
def rlookup_uid(_):
return None
repos = GitRepository(self.env, dir, params, self.log,
persistent_cache=self.persistent_cache,
git_bin=self.git_bin,
git_fs_encoding=self.git_fs_encoding,
shortrev_len=self.shortrev_len,
rlookup_uid=rlookup_uid,
use_committer_id=self.use_committer_id,
use_committer_time=self.use_committer_time,
)
if self.cached_repository:
repos = GitCachedRepository(self.env, repos, self.log)
self.log.debug("enabled CachedRepository for '%s'", dir)
else:
self.log.debug("disabled CachedRepository for '%s'", dir)
return repos
class CsetPropertyRenderer(Component):
implements(IPropertyRenderer)
# relied upon by GitChangeset
def match_property(self, name, mode):
# default renderer has priority 1
return (name in ('Parents',
'Children',
'Branches',
'git-committer',
'git-author',
) and mode == 'revprop') and 4 or 0
def render_property(self, name, mode, context, props):
def sha_link(sha, label=None):
# sha is assumed to be a non-abbreviated 40-chars sha id
try:
reponame = context.resource.parent.id
repos = RepositoryManager(self.env).get_repository(reponame)
cset = repos.get_changeset(sha)
if label is None:
label = repos.display_rev(sha)
return tag.a(label, class_='changeset',
title=shorten_line(cset.message),
href=context.href.changeset(sha, repos.reponame))
except Exception as e:
return tag.a(sha, class_='missing changeset',
title=to_unicode(e), rel='nofollow')
if name == 'Branches':
branches = props[name]
# simple non-merge commit
return tag(*intersperse(', ', (sha_link(rev, label)
for label, rev in branches)))
elif name in ('Parents', 'Children'):
revs = props[name] # list of commit ids
if name == 'Parents' and len(revs) > 1:
# we got a merge...
current_sha = context.resource.id
reponame = context.resource.parent.id
parent_links = intersperse(', ',
((sha_link(rev),
' (',
tag.a(_("diff"),
title=_("Diff against this parent (show the "
"changes merged from the other parents)"),
href=context.href.changeset(current_sha, reponame,
old=rev)),
')')
for rev in revs))
return tag(list(parent_links),
tag.br(),
tag.span(Markup(_("Note: this is a <strong>merge"
"</strong> changeset, the "
"changes displayed below "
"correspond to the merge "
"itself.")),
class_='hint'),
tag.br(),
tag.span(Markup(_("Use the <code>(diff)</code> "
"links above to see all the "
"changes relative to each "
"parent.")),
class_='hint'))
# simple non-merge commit
return tag(*intersperse(', ', map(sha_link, revs)))
elif name in ('git-committer', 'git-author'):
user_, time_ = props[name]
_str = "%s (%s)" % (
Chrome(self.env).format_author(context.req, user_),
format_datetime(time_, tzinfo=context.req.tz))
return unicode(_str)
raise TracError(_("Internal error"))
class GitRepository(Repository):
"""Git repository"""
def __init__(self, env, path, params, log,
persistent_cache=False,
git_bin='git',
git_fs_encoding='utf-8',
shortrev_len=7,
rlookup_uid=lambda _: None,
use_committer_id=False,
use_committer_time=False,
):
self.env = env
self.logger = log
self.gitrepo = path
self.params = params
self.persistent_cache = persistent_cache
self.shortrev_len = max(4, min(shortrev_len, 40))
self.rlookup_uid = rlookup_uid
self.use_committer_time = use_committer_time
self.use_committer_id = use_committer_id
try:
factory = PyGIT.StorageFactory(path, log, not persistent_cache,
git_bin=git_bin,
git_fs_encoding=git_fs_encoding)
self._git = factory.getInstance()
except PyGIT.GitError as e:
log.error(exception_to_unicode(e))
raise InvalidRepository(
_('"%(name)s" is not readable or not a Git repository.',
name=params.get('name') or '(default)'))
Repository.__init__(self, 'git:' + path, self.params, log)
self._cached_git_id = str(self.id)
def close(self):
self._git = None
@property
def git(self):
if self.persistent_cache:
return self._cached_git
else:
return self._git
@cached('_cached_git_id')
def _cached_git(self):
self._git.invalidate_rev_cache()
return self._git
def get_youngest_rev(self):
return self.git.youngest_rev()
def get_path_history(self, path, rev=None, limit=None):
raise TracError(_("Unsupported \"Show only adds and deletes\""))
def get_oldest_rev(self):
return self.git.oldest_rev()
def normalize_path(self, path):
return path and path.strip('/') or '/'
def normalize_rev(self, rev):
if not rev:
return self.get_youngest_rev()
normrev = self.git.verifyrev(rev)
if normrev is None:
raise NoSuchChangeset(rev)
return normrev
def display_rev(self, rev):
return self.short_rev(rev)
def short_rev(self, rev):
return self.git.shortrev(self.normalize_rev(rev),
min_len=self.shortrev_len)
def get_node(self, path, rev=None):
return self._get_node(path, rev)
def _get_node(self, path, rev, ls_tree_info=None, historian=None):
return GitNode(self, path, rev, self.log, ls_tree_info, historian)
def get_quickjump_entries(self, rev):
for bname, bsha in self.git.get_branches():
yield 'branches', bname, '/', bsha
for t in self.git.get_tags():
yield 'tags', t, '/', t
def get_path_url(self, path, rev):
return self.params.get('url')
def get_changesets(self, start, stop):
for rev in self.git.history_timerange(to_timestamp(start),
to_timestamp(stop)):
yield self.get_changeset(rev)
def get_changeset(self, rev):
"""GitChangeset factory method"""
return GitChangeset(self, rev)
def get_changeset_uid(self, rev):
return self.normalize_rev(rev)
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=0):
# TODO: handle renames/copies, ignore_ancestry
old_path = self.normalize_path(old_path)
new_path = self.normalize_path(new_path)
if old_path != new_path:
raise TracError(_("Not supported in git_fs"))
old_rev = self.normalize_rev(old_rev)
new_rev = self.normalize_rev(new_rev)
if old_rev == new_rev:
return
def get_tree(rev):
results = self.git.ls_tree(rev, target_path, recursive=True)
return {result[4]: result for result in results}
target_path = old_path.strip('/')
old_tree = get_tree(old_rev)
new_tree = get_tree(new_rev)
with self.git.get_historian(old_rev, target_path) as old_historian:
with self.git.get_historian(new_rev, target_path) as new_historian:
for chg in self.git.diff_tree(old_rev, new_rev, target_path):
mode1, mode2, obj1, obj2, action, path, path2 = chg
kind = Node.DIRECTORY \
if _is_dir(mode2) or _is_dir(mode1) \
else Node.FILE
change = GitChangeset.action_map[action]
old_node = self._get_node(path, old_rev,
old_tree.get(path, False),
old_historian) \
if change != Changeset.ADD else None
new_node = self._get_node(path, new_rev,
new_tree.get(path, False),
new_historian) \
if change != Changeset.DELETE else None
yield old_node, new_node, kind, change
def next_rev(self, rev, path=''):
return self.git.hist_next_revision(rev)
def previous_rev(self, rev, path=''):
return self.git.hist_prev_revision(rev)
def parent_revs(self, rev):
return self.git.parents(rev)
def child_revs(self, rev):
return self.git.children(rev)
def rev_older_than(self, rev1, rev2):
return self.git.rev_is_anchestor_of(self.normalize_rev(rev1),
self.normalize_rev(rev2))
# def clear(self, youngest_rev=None):
# self.youngest = None
# if youngest_rev is not None:
# self.youngest = self.normalize_rev(youngest_rev)
# self.oldest = None
def clear(self, youngest_rev=None):
self.sync()
def sync(self, rev_callback=None, clean=None):
if rev_callback:
revs = set(self.git.all_revs())
if self.persistent_cache:
del self._cached_git # invalidate persistent cache
if not self.git.sync():
return None # nothing expected to change
if rev_callback:
revs = set(self.git.all_revs()) - revs
for rev in revs:
rev_callback(rev)
class GitNode(Node):
def __init__(self, repos, path, rev, log, ls_tree_info=None,
historian=None):
self.log = log
self.repos = repos
self.fs_sha = None # points to either tree or blobs
self.fs_perm = None
self.fs_type = None
self.fs_size = None
if rev:
rev = repos.normalize_rev(to_unicode(rev))
else:
rev = repos.youngest_rev
created_rev = rev
kind = Node.DIRECTORY
p = path.strip('/')
if p: # ie. not the root-tree
if not rev:
raise NoSuchNode(path, rev)
if ls_tree_info is None:
ls_tree_info = repos.git.ls_tree(rev, p)
if ls_tree_info:
ls_tree_info = ls_tree_info[0]
if not ls_tree_info:
raise NoSuchNode(path, rev)
self.fs_perm, self.fs_type, self.fs_sha, self.fs_size, fname = \
ls_tree_info
# fix-up to the last commit-rev that touched this node
created_rev = repos.git.last_change(rev, p, historian)
if self.fs_type == 'tree':
kind = Node.DIRECTORY
elif self.fs_type == 'blob':
kind = Node.FILE
elif _is_submodule(self.fs_perm):
# FIXME: this is a workaround for missing git submodule
# support in the plugin
kind = Node.DIRECTORY
else:
self.log.warning('Got unexpected object %r', ls_tree_info)
raise TracError(_("Internal error (got unexpected object "
"kind '%(kind)s')", kind=self.fs_type))
self.created_path = path
self.created_rev = created_rev
Node.__init__(self, repos, path, rev, kind)
def __git_path(self):
"""return path as expected by PyGIT"""
p = self.path.strip('/')
if self.isfile:
assert p
return p
if self.isdir:
return p and (p + '/')
raise TracError(_("Internal error"))
def get_content(self):
if not self.isfile:
return None
return self.repos.git.get_file(self.fs_sha)
def get_properties(self):
if self.fs_perm is None:
return {}
props = {'mode': '%06o' % self.fs_perm}
if _is_submodule(self.fs_perm):
props['commit'] = self.fs_sha
return props
def get_annotations(self):
if not self.isfile:
return
return [rev for rev, lineno in \
self.repos.git.blame(self.rev,self.__git_path())]
def get_entries(self):
if not self.rev: # if empty repository
return
if not self.isdir:
return
if _is_submodule(self.fs_perm):
return
with self.repos.git.get_historian(self.rev,
self.path.strip('/')) as historian:
for ent in self.repos.git.ls_tree(self.rev, self.__git_path()):
yield GitNode(self.repos, ent[-1], self.rev, self.log, ent,
historian)
def get_content_type(self):
if self.isdir:
return None
return ''
def get_content_length(self):
if not self.isfile:
return None
if self.fs_size is None:
self.fs_size = self.repos.git.get_obj_size(self.fs_sha)
return self.fs_size
def get_history(self, limit=None):
if not self.rev: # if empty repository
return
# TODO: find a way to follow renames/copies
for is_last, rev in _last_iterable(self.repos.git.history(self.rev,
self.__git_path(), limit)):
yield (self.path, rev, Changeset.EDIT if not is_last else
Changeset.ADD)
def get_last_modified(self):
if not self.isfile:
return None
try:
msg, props = self.repos.git.read_commit(self.rev)
user, ts = _parse_user_time(props['committer'][0])
except:
self.log.error("internal error (could not get timestamp from "
"commit '%s')", self.rev)
return None
return ts
class GitChangeset(Changeset):
"""A Git changeset in the Git repository.
Corresponds to a Git commit blob.
"""
action_map = { # see also git-diff-tree(1) --diff-filter
'A': Changeset.ADD,
'M': Changeset.EDIT, # modified
'T': Changeset.EDIT, # file type (mode) change
'D': Changeset.DELETE,
'R': Changeset.MOVE, # renamed
'C': Changeset.COPY
} # TODO: U, X, B
def __init__(self, repos, sha):
if sha is None:
raise NoSuchChangeset(sha)
try:
msg, props = repos.git.read_commit(sha)
except PyGIT.GitErrorSha:
raise NoSuchChangeset(sha)
self.props = props
assert 'children' not in props
_children = list(repos.git.children(sha))
if _children:
props['children'] = _children
committer, author = self._get_committer_and_author()
# use 1st author/committer as changeset owner/timestamp
c_user = a_user = c_time = a_time = None
if committer:
c_user, c_time = _parse_user_time(committer)
if author:
a_user, a_time = _parse_user_time(author)
if repos.use_committer_time:
time = c_time or a_time
else:
time = a_time or c_time
if repos.use_committer_id:
user = c_user or a_user
else:
user = a_user or c_user
# try to resolve email address to trac uid
user = repos.rlookup_uid(user) or user
Changeset.__init__(self, repos, rev=sha, message=msg, author=user,
date=time)
def _get_committer_and_author(self):
committer = author = None
if 'committer' in self.props:
committer = self.props['committer'][0]
if 'author' in self.props:
author = self.props['author'][0]
return committer, author
def get_properties(self):
properties = {}
if 'parent' in self.props:
properties['Parents'] = self.props['parent']
if 'children' in self.props:
properties['Children'] = self.props['children']
committer, author = self._get_committer_and_author()
if author != committer:
properties['git-committer'] = _parse_user_time(committer)
properties['git-author'] = _parse_user_time(author)
branches = list(self.repos.git.get_branch_contains(self.rev,
resolve=True))
if branches:
properties['Branches'] = branches
return properties
def get_changes(self):
# Returns the differences against the first parent
parent = self.props.get('parent')
parent = parent[0] if parent else None
for mode1, mode2, obj1, obj2, action, path1, path2 in \
self.repos.git.diff_tree(parent, self.rev, find_renames=True):
path = path2 or path1
p_path, p_rev = path1, parent
kind = Node.DIRECTORY \
if _is_dir(mode2) or _is_dir(mode1) else \
Node.FILE
action = GitChangeset.action_map[action]
if action == Changeset.ADD:
p_path = p_rev = None
yield path, kind, action, p_path, p_rev
def get_branches(self):
_rev = self.rev
return [(k, v == _rev)
for k, v in self.repos.git.get_branch_contains(_rev,
resolve=True)]
def get_tags(self):
return self.repos.git.get_tags(self.rev)
class GitwebProjectsRepositoryProvider(Component):
implements(IRepositoryProvider)
projects_list = PathOption('gitweb-repositories', 'projects_list', doc=
"""Path to a gitweb-formatted projects.list""")
projects_base = PathOption('gitweb-repositories', 'projects_base', doc=
"""Path to the base of your git projects""")
projects_url = Option('gitweb-repositories', 'projects_url', doc=
"""Template for project URLs. `%s` will be replaced with the repo
name""")
sync_per_request = ListOption('gitweb-repositories',
'sync_per_request', '', doc="""Repositories to sync on every request
(not recommended).""")
def get_repositories(self):
"""Retrieve repositories specified in a `projects_list` file."""
if not self.projects_list:
return
if not os.path.exists(self.projects_list):
self.log.warning("The [git] projects_list file was not found at "
"'%s'", self.projects_list)
return
with open(self.projects_list, 'r') as fp:
for line in fp:
entries = line.strip().split()
if entries:
name = entries[0]
reponame = name.rstrip('.git')
info = {
'dir': os.path.join(self.projects_base, name),
'sync_per_request': reponame in self.sync_per_request,
'type': 'git',
}
description_path = \
os.path.join(info['dir'], 'description')
if os.path.exists(description_path):
with open(description_path, 'r') as fd:
info['description'] = fd.read().strip()
if self.projects_url:
info['url'] = self.projects_url % reponame
yield reponame, info
| |
'''
Unit tests for mir_eval.hierarchy
'''
from glob import glob
import re
import warnings
import json
import numpy as np
import scipy.sparse
import mir_eval
from nose.tools import raises
A_TOL = 1e-12
def test_tmeasure_pass():
# The estimate here gets none of the structure correct.
ref = [[[0, 30]], [[0, 15], [15, 30]]]
# convert to arrays
ref = [np.asarray(_) for _ in ref]
est = ref[:1]
def __test(window, frame_size):
# The estimate should get 0 score here
scores = mir_eval.hierarchy.tmeasure(ref, est,
window=window,
frame_size=frame_size)
for k in scores:
assert k == 0.0
# The reference should get a perfect score here
scores = mir_eval.hierarchy.tmeasure(ref, ref,
window=window,
frame_size=frame_size)
for k in scores:
assert k == 1.0
for window in [5, 10, 15, 30, 90, None]:
for frame_size in [0.1, 0.5, 1.0]:
yield __test, window, frame_size
def test_tmeasure_warning():
# Warn if there are missing boundaries from one layer to the next
ref = [[[0, 5],
[5, 10]],
[[0, 10]]]
ref = [np.asarray(_) for _ in ref]
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
mir_eval.hierarchy.tmeasure(ref, ref)
assert len(out) > 0
assert out[0].category is UserWarning
assert ('Segment hierarchy is inconsistent at level 1'
in str(out[0].message))
def test_tmeasure_fail_span():
# Does not start at 0
ref = [[[1, 10]],
[[1, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref
# Does not end at the right time
ref = [[[0, 5]],
[[0, 5],
[5, 6]]]
ref = [np.asarray(_) for _ in ref]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, ref
# Two annotaions of different shape
ref = [[[0, 10]],
[[0, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
est = [[[0, 15]],
[[0, 5],
[5, 15]]]
est = [np.asarray(_) for _ in est]
yield raises(ValueError)(mir_eval.hierarchy.tmeasure), ref, est
def test_tmeasure_fail_frame_size():
ref = [[[0, 60]],
[[0, 30],
[30, 60]]]
ref = [np.asarray(_) for _ in ref]
@raises(ValueError)
def __test(window, frame_size):
mir_eval.hierarchy.tmeasure(ref, ref,
window=window,
frame_size=frame_size)
for window in [None, 15, 30]:
for frame_size in [-1, 0]:
yield __test, window, frame_size
if window is not None:
yield __test, window, 2 * window
def test_lmeasure_pass():
# The estimate here gets none of the structure correct.
ref = [[[0, 30]], [[0, 15], [15, 30]]]
ref_lab = [['A'], ['a', 'b']]
# convert to arrays
ref = [np.asarray(_) for _ in ref]
est = ref[:1]
est_lab = ref_lab[:1]
def __test(frame_size):
# The estimate should get 0 score here
scores = mir_eval.hierarchy.lmeasure(ref, ref_lab, est, est_lab,
frame_size=frame_size)
for k in scores:
assert k == 0.0
# The reference should get a perfect score here
scores = mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab,
frame_size=frame_size)
for k in scores:
assert k == 1.0
for frame_size in [0.1, 0.5, 1.0]:
yield __test, frame_size
def test_lmeasure_warning():
# Warn if there are missing boundaries from one layer to the next
ref = [[[0, 5],
[5, 10]],
[[0, 10]]]
ref = [np.asarray(_) for _ in ref]
ref_lab = [['a', 'b'], ['A']]
warnings.resetwarnings()
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as out:
mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab)
assert len(out) > 0
assert out[0].category is UserWarning
assert ('Segment hierarchy is inconsistent at level 1'
in str(out[0].message))
def test_lmeasure_fail_span():
# Does not start at 0
ref = [[[1, 10]],
[[1, 5],
[5, 10]]]
ref_lab = [['A'], ['a', 'b']]
ref = [np.asarray(_) for _ in ref]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, ref, ref_lab)
# Does not end at the right time
ref = [[[0, 5]],
[[0, 5],
[5, 6]]]
ref = [np.asarray(_) for _ in ref]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, ref, ref_lab)
# Two annotations of different shape
ref = [[[0, 10]],
[[0, 5],
[5, 10]]]
ref = [np.asarray(_) for _ in ref]
est = [[[0, 15]],
[[0, 5],
[5, 15]]]
est = [np.asarray(_) for _ in est]
yield (raises(ValueError)(mir_eval.hierarchy.lmeasure),
ref, ref_lab, est, ref_lab)
def test_lmeasure_fail_frame_size():
ref = [[[0, 60]],
[[0, 30],
[30, 60]]]
ref = [np.asarray(_) for _ in ref]
ref_lab = [['A'], ['a', 'b']]
@raises(ValueError)
def __test(frame_size):
mir_eval.hierarchy.lmeasure(ref, ref_lab, ref, ref_lab,
frame_size=frame_size)
for frame_size in [-1, 0]:
yield __test, frame_size
def test_hierarchy_regression():
ref_files = sorted(glob('data/hierarchy/ref*.lab'))
est_files = sorted(glob('data/hierarchy/est*.lab'))
out_files = sorted(glob('data/hierarchy/output*.json'))
ref_hier = [mir_eval.io.load_labeled_intervals(_) for _ in ref_files]
est_hier = [mir_eval.io.load_labeled_intervals(_) for _ in est_files]
ref_ints = [seg[0] for seg in ref_hier]
ref_labs = [seg[1] for seg in ref_hier]
est_ints = [seg[0] for seg in est_hier]
est_labs = [seg[1] for seg in est_hier]
def __test(w, ref_i, ref_l, est_i, est_l, target):
outputs = mir_eval.hierarchy.evaluate(ref_i, ref_l,
est_i, est_l,
window=w)
for key in target:
assert np.allclose(target[key], outputs[key], atol=A_TOL)
for out in out_files:
with open(out, 'r') as fdesc:
target = json.load(fdesc)
# Extract the window parameter
window = float(re.match('.*output_w=(\d+).json$', out).groups()[0])
yield __test, window, ref_ints, ref_labs, est_ints, est_labs, target
def test_count_inversions():
# inversion count = |{(i, j) : a[i] >= b[j]}|
a = [2, 4, 6]
b = [1, 2, 3, 4]
# All inversions (a, b) are:
# (2, 1), (2, 2)
# (4, 1), (4, 2), (4, 3), (4, 4)
# (6, 1), (6, 2), (6, 3), (6, 4)
assert mir_eval.hierarchy._count_inversions(a, b) == 10
# All inversions (b, a) are:
# (2, 2)
# (3, 2)
# (4, 2), (4, 4)
assert mir_eval.hierarchy._count_inversions(b, a) == 4
# And test with repetitions
a = [2, 2, 4]
b = [1, 2, 4, 4]
# counts: (a, b)
# (2, 1), (2, 2)
# (2, 1), (2, 2)
# (4, 1), (4, 2), (4, 4), (4, 4)
assert mir_eval.hierarchy._count_inversions(a, b) == 8
# count: (b, a)
# (2, 2), (2, 2)
# (4, 2), (4, 2), (4, 4)
# (4, 2), (4, 2), (4, 4)
assert mir_eval.hierarchy._count_inversions(b, a) == 8
def test_meet():
frame_size = 1
int_hier = [np.array([[0, 10]]),
np.array([[0, 6], [6, 10]]),
np.array([[0, 2], [2, 4], [4, 6], [6, 8], [8, 10]])]
lab_hier = [['X'],
['A', 'B'],
['a', 'b', 'a', 'c', 'b']]
# Target output
meet_truth = np.asarray([
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[2, 2, 3, 3, 2, 2, 1, 1, 3, 3], # (XAb)
[2, 2, 3, 3, 2, 2, 1, 1, 3, 3], # (XAb)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[3, 3, 2, 2, 3, 3, 1, 1, 1, 1], # (XAa)
[1, 1, 1, 1, 1, 1, 3, 3, 2, 2], # (XBc)
[1, 1, 1, 1, 1, 1, 3, 3, 2, 2], # (XBc)
[1, 1, 3, 3, 1, 1, 2, 2, 3, 3], # (XBb)
[1, 1, 3, 3, 1, 1, 2, 2, 3, 3], # (XBb)
])
meet = mir_eval.hierarchy._meet(int_hier, lab_hier, frame_size)
# Is it the right type?
assert isinstance(meet, scipy.sparse.csr_matrix)
meet = meet.todense()
# Does it have the right shape?
assert meet.shape == (10, 10)
# Does it have the right value?
assert np.all(meet == meet_truth)
def test_compare_frame_rankings():
# number of pairs (i, j)
# where ref[i] < ref[j] and est[i] >= est[j]
ref = np.asarray([1, 2, 3, 3])
# ref pairs (transitive)
# (1, 2), (1, 3), (1, 3), (2, 3), (2, 3)
# ref pairs (non-transitive)
# (1, 2), (2, 3), (2, 3)
# Just count the normalizers
# No self-inversions are possible from ref to itself
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=True)
assert inv == 0
assert norm == 5.0
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=False)
assert inv == 0
assert norm == 3.0
est = np.asarray([1, 2, 1, 3])
# In the transitive case, we lose two pairs
# (1, 3) and (2, 2) -> (1, 1), (2, 1)
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, est,
transitive=True)
assert inv == 2
assert norm == 5.0
# In the non-transitive case, we only lose one pair
# because (1,3) was not counted
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, est,
transitive=False)
assert inv == 1
assert norm == 3.0
# Do an all-zeros test
ref = np.asarray([1, 1, 1, 1])
inv, norm = mir_eval.hierarchy._compare_frame_rankings(ref, ref,
transitive=True)
assert inv == 0
assert norm == 0.0
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms,
dct_coefficient_count):
"""Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
dct_coefficient_count: Number of frequency bins to use for analysis.
Returns:
Dictionary containing common settings.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
fingerprint_size = dct_coefficient_count * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
'dct_coefficient_count': dct_coefficient_count,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
}
def create_model(fingerprint_input, model_settings, model_architecture,
is_training):
"""Builds a model of the requested architecture compatible with the settings.
There are many possible ways of deriving predictions from a spectrogram
input, so this function provides an abstract interface for creating different
kinds of models in a black-box way. You need to pass in a TensorFlow node as
the 'fingerprint' input, and this should output a batch of 1D features that
describe the audio. Typically this will be derived from a spectrogram that's
been run through an MFCC, but in theory it can be any feature vector of the
size specified in model_settings['fingerprint_size'].
The function will build the graph it needs in the current TensorFlow graph,
and return the tensorflow output that will contain the 'logits' input to the
softmax prediction process. If training flag is on, it will also return a
placeholder node that can be used to control the dropout amount.
See the implementations below for the possible model architectures that can be
requested.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
model_architecture: String specifying which kind of model to create.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
Exception: If the architecture type isn't recognized.
"""
if model_architecture == 'single_fc':
return create_single_fc_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'conv':
return create_conv_model(fingerprint_input, model_settings, is_training)
elif model_architecture == 'low_latency_conv':
return create_low_latency_conv_model(fingerprint_input, model_settings,
is_training)
else:
raise Exception('model_architecture argument "' + model_architecture +
'" not recognized, should be one of "single_fc", "conv",' +
' or "low_latency_conv"')
def load_variables_from_checkpoint(sess, start_checkpoint):
"""Utility function to centralize checkpoint restoration.
Args:
sess: TensorFlow session.
start_checkpoint: Path to saved checkpoint on disk.
"""
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, start_checkpoint)
def create_single_fc_model(fingerprint_input, model_settings, is_training):
"""Builds a model with a single hidden fully-connected layer.
This is a very simple model with just one matmul and bias layer. As you'd
expect, it doesn't produce very accurate results, but it is very fast and
simple, so it's useful for sanity testing.
Here's the layout of the graph:
(fingerprint_input)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
weights = tf.Variable(
tf.truncated_normal([fingerprint_size, label_count], stddev=0.001))
bias = tf.Variable(tf.zeros([label_count]))
logits = tf.matmul(fingerprint_input, weights) + bias
if is_training:
return logits, dropout_prob
else:
return logits
def create_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a standard convolutional model.
This is roughly the network labeled as 'cnn-trad-fpool3' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces fairly good quality results, but can involve a large number of
weight parameters and computations. For a cheaper alternative from the same
paper with slightly less accuracy, see 'low_latency_conv' below.
During training, dropout nodes are introduced after each relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 20
first_filter_count = 64
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],
'SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
second_filter_width = 4
second_filter_height = 10
second_filter_count = 64
second_weights = tf.Variable(
tf.truncated_normal(
[
second_filter_height, second_filter_width, first_filter_count,
second_filter_count
],
stddev=0.01))
second_bias = tf.Variable(tf.zeros([second_filter_count]))
second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],
'SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
if is_training:
second_dropout = tf.nn.dropout(second_relu, dropout_prob)
else:
second_dropout = second_relu
second_conv_shape = second_dropout.get_shape()
second_conv_output_width = second_conv_shape[2]
second_conv_output_height = second_conv_shape[1]
second_conv_element_count = int(
second_conv_output_width * second_conv_output_height *
second_filter_count)
flattened_second_conv = tf.reshape(second_dropout,
[-1, second_conv_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_conv_element_count, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 4
first_weights = tf.Variable(
tf.truncated_normal(
[first_filter_height, first_filter_width, 1, first_filter_count],
stddev=0.01))
first_bias = tf.Variable(tf.zeros([first_filter_count]))
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.Variable(
tf.truncated_normal(
[first_conv_element_count, first_fc_output_channels], stddev=0.01))
first_fc_bias = tf.Variable(tf.zeros([first_fc_output_channels]))
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.Variable(
tf.truncated_normal(
[first_fc_output_channels, second_fc_output_channels], stddev=0.01))
second_fc_bias = tf.Variable(tf.zeros([second_fc_output_channels]))
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.Variable(
tf.truncated_normal(
[second_fc_output_channels, label_count], stddev=0.01))
final_fc_bias = tf.Variable(tf.zeros([label_count]))
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
| |
"""
Tests for offsets.CustomBusinessHour
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
BusinessHour,
CustomBusinessHour,
Nano,
)
import pandas._testing as tm
from pandas.tests.tseries.offsets.common import (
Base,
assert_offset_equal,
)
from pandas.tseries.holiday import USFederalHolidayCalendar
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
holidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask="Tue Wed Thu Fri")
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
msg = "time data must be specified only with hour and minute"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start=dt_time(11, 0, 5))
msg = "time data must match '%H:%M' format"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="AAA")
msg = "time data must match '%H:%M' format"
with pytest.raises(ValueError, match=msg):
CustomBusinessHour(start="14:00:05")
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == "<CustomBusinessHour: CBH=09:00-17:00>"
assert repr(self.offset2) == "<CustomBusinessHour: CBH=09:00-17:00>"
def test_with_offset(self):
expected = Timestamp("2014-07-01 13:00")
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert CustomBusinessHour(start="09:00") == CustomBusinessHour()
assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")
assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(
start="17:00", end="09:01"
)
assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(
weekmask="Mon Tue Wed Thu Fri"
)
assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(
holidays=["2014-06-28"]
)
def test_sub(self):
# override the Base.test_sub implementation because self.offset2 is
# defined differently in this class than the test expects
pass
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
2014, 7, 4, 17, 0
)
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
2014, 7, 7, 9
)
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = [
(
CustomBusinessHour(normalize=True, holidays=holidays),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
},
),
(
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{
datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
},
),
(
CustomBusinessHour(
1, normalize=True, start="17:00", end="04:00", holidays=holidays
),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
},
),
]
@pytest.mark.parametrize("norm_cases", normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in cases.items():
assert offset.apply(dt) == expected
def test_is_on_offset(self):
tests = [
(
CustomBusinessHour(start="10:00", end="15:00", holidays=self.holidays),
{
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False,
},
)
]
for offset, cases in tests:
for dt, expected in cases.items():
assert offset.is_on_offset(dt) == expected
apply_cases = [
(
CustomBusinessHour(holidays=holidays),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
},
),
(
CustomBusinessHour(4, holidays=holidays),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
},
),
]
@pytest.mark.parametrize("apply_case", apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
nano_cases = [
(
CustomBusinessHour(holidays=holidays),
{
Timestamp("2014-07-01 15:00")
+ Nano(5): Timestamp("2014-07-01 16:00")
+ Nano(5),
Timestamp("2014-07-01 16:00")
+ Nano(5): Timestamp("2014-07-03 09:00")
+ Nano(5),
Timestamp("2014-07-01 16:00")
- Nano(5): Timestamp("2014-07-01 17:00")
- Nano(5),
},
),
(
CustomBusinessHour(-1, holidays=holidays),
{
Timestamp("2014-07-01 15:00")
+ Nano(5): Timestamp("2014-07-01 14:00")
+ Nano(5),
Timestamp("2014-07-01 10:00")
+ Nano(5): Timestamp("2014-07-01 09:00")
+ Nano(5),
Timestamp("2014-07-01 10:00")
- Nano(5): Timestamp("2014-06-26 17:00")
- Nano(5),
},
),
]
@pytest.mark.parametrize("nano_case", nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_us_federal_holiday_with_datetime(self):
# GH 16867
bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar())
t0 = datetime(2014, 1, 17, 15)
result = t0 + bhour_us * 8
expected = Timestamp("2014-01-21 15:00:00")
assert result == expected
| |
'''
A silly demonstration of how to use the Apple remote.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet.gl import *
import sys
class MainWindow(pyglet.window.Window):
def __init__(self):
super(MainWindow, self).__init__(visible=False)
self.set_caption('Apple Remote Example')
# Look for the Apple Remote device.
remote = pyglet.input.get_apple_remote()
if not remote:
print 'Apple IR Remote not available.'
sys.exit(0)
# Open the remote in exclusive mode so that pressing the remote
# buttons does not activate Front Row, change volume, etc. while
# the remote is being used by our program.
remote.open(self, exclusive=True)
# We push this class onto the remote's event handler stack so that
# the on_button_press and on_button_release methods which we define
# below will be called for the appropriate remote events.
remote.push_handlers(self)
self.carousel = Carousel()
self.setup_opengl()
pyglet.clock.schedule_interval(self.update, 1/60.0)
# Event handler for Apple Remote button press events.
# The button parameter is a string specifying the button that was pressed.
def on_button_press(self, button):
print 'on_button_press', button
if button == 'up':
self.carousel.scroll_up()
elif button == 'down':
self.carousel.scroll_down()
elif button == 'left':
self.carousel.step_left()
elif button == 'right':
self.carousel.step_right()
elif button == 'left_hold':
self.carousel.rotate_left()
elif button == 'right_hold':
self.carousel.rotate_right()
elif button == 'select' or button == 'select_hold':
self.carousel.swap_left()
elif button == 'menu' or button == 'menu_hold':
self.carousel.swap_right()
# Event handler for Apple Remote button release events.
# The button parameter is a string specifying the button that was released.
def on_button_release(self, button):
print 'on_button_release', button
if button == 'left_hold':
self.carousel.stop_rotating()
elif button == 'right_hold':
self.carousel.stop_rotating()
def on_draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(0,3,-12,0,3,0,0,1,0)
self.carousel.draw()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = width / float(height)
glFrustum(-1,1,-1.8/aspect,0.2/aspect,1,100)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def setup_opengl(self):
glClearColor(1,1,1,1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def update(self, dt):
self.carousel.update(dt)
class Carousel:
"""A rotating collection of labeled tiles."""
def __init__(self):
self.num_tiles = 14
self.index = 0
self.float_index = 0.0
self.float_increment = 1.0 / self.num_tiles
self.angle = 0
self.index_diff = 0
self.is_rotating = False
self.speed = 4 * self.num_tiles
# Create the tiles in the carousel.
self.tiles = []
colors = [(255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,205,205), (128,0,128), (255,165,0)]
class Tile:
value = 0
color = [255,255,255]
for i in range(self.num_tiles):
tile = Tile()
tile.value = i % 26
tile.color = colors[i%len(colors)]
self.tiles.append(tile)
# Create glyphs for the characters displayed on the tiles.
font = pyglet.font.load('Courier', 64)
self.glyphs = font.get_glyphs('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
def scroll_up(self):
"""Increment the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value + 1) % 26
def scroll_down(self):
"""Decrement the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value - 1) % 26
def swap_left(self):
"""Swap the two left tiles."""
i = self.index
j = (self.index - 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def swap_right(self):
"""Swap the two right tiles."""
i = self.index
j = (self.index + 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def step_left(self):
"""Rotate the carousel one tile to the left."""
self.direction = -1
self.index_diff += 1.0
def step_right(self):
"""Rotate the carousel one tile to the right."""
self.direction = 1
self.index_diff += 1.0
def rotate_left(self):
"""Start the carousel rotating continuously to the left."""
self.is_rotating = True
self.direction = -1
def rotate_right(self):
"""Start the carousel rotating continuously to the right."""
self.is_rotating = True
self.direction = 1
def stop_rotating(self):
"""Stop continuous rotation and make sure we end up at a tile location."""
self.index_diff = round(self.float_index) - self.float_index
if self.index_diff < 0:
self.direction = -1
else:
self.direction = 1
self.index_diff = abs(self.index_diff)
def draw(self):
glPushMatrix()
glRotatef(-self.angle, 0, 1, 0)
for i in range(self.num_tiles):
self.draw_tile(i)
glPopMatrix()
def draw_tile(self, index):
angle = index * (360.0 / self.num_tiles)
glPushMatrix()
glRotatef(angle,0,1,0)
glTranslatef(0,0,-7.5)
glRotatef(-angle+self.angle,0,1,0)
texture = self.glyphs[self.tiles[index].value].texture
vertex_list = pyglet.graphics.vertex_list(4, 'v2f', ('t3f', texture.tex_coords))
vertex_list.vertices[:] = [-1, -1, 1, -1, 1, 1, -1, 1]
# Draw tile background.
glColor3ub(*self.tiles[index].color)
vertex_list.draw(GL_QUADS)
# Draw tile label.
glBindTexture(texture.target, texture.id)
glEnable(texture.target)
glColor3ub(0,0,0)
vertex_list.vertices[:] = [.8, -.8, -.8, -.8, -.8, .8, .8, .8]
glTranslatef(0,0,-.01)
vertex_list.draw(GL_QUADS)
glDisable(texture.target)
glPopMatrix()
def update(self, dt):
if self.is_rotating or self.index_diff:
increment = self.direction * self.speed * self.float_increment * dt
self.float_index = (self.float_index + increment) % self.num_tiles
if self.index_diff:
self.index_diff -= abs(increment)
if self.index_diff < 0:
self.index_diff = 0
self.float_index = round(self.float_index) % self.num_tiles
self.index = int(self.float_index)
self.is_rotating = False
self.angle = (self.float_index / self.num_tiles) * 360
if __name__ == '__main__':
window = MainWindow()
window.clear()
window.flip()
window.set_visible(True)
pyglet.app.run()
| |
import os
import subprocess
import sys
from six import print_
from ccmlib import common, repository
from ccmlib.cluster import Cluster
from ccmlib.cluster_factory import ClusterFactory
from ccmlib.cmds.command import Cmd
from ccmlib.common import ArgumentError
from ccmlib.dse_cluster import DseCluster
from ccmlib.dse_node import DseNode
from ccmlib.node import Node, NodeError
def cluster_cmds():
return [
"create",
"add",
"populate",
"list",
"switch",
"status",
"remove",
"clear",
"liveset",
"start",
"stop",
"flush",
"compact",
"stress",
"updateconf",
"updatedseconf",
"updatelog4j",
"cli",
"setdir",
"bulkload",
"setlog",
"scrub",
"verify",
"invalidatecache",
"checklogerror",
"showlastlog",
"jconsole"
]
def parse_populate_count(v):
if v is None:
return None
tmp = v.split(':')
if len(tmp) == 1:
return int(tmp[0])
else:
return [int(t) for t in tmp]
class ClusterCreateCmd(Cmd):
def description(self):
return "Create a new cluster"
def get_parser(self):
usage = "usage: ccm create [options] cluster_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-switch', action="store_true", dest="no_switch",
help="Don't switch to the newly created cluster", default=False)
parser.add_option('-p', '--partitioner', type="string", dest="partitioner",
help="Set the cluster partitioner class")
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option('-o', "--opsc", type="string", dest="opscenter",
help="Download and use provided opscenter version to install with DSE. Will have no effect on cassandra installs)", default=None)
parser.add_option("--dse", action="store_true", dest="dse",
help="Use with -v to indicate that the version being loaded is DSE")
parser.add_option("--dse-username", type="string", dest="dse_username",
help="The username to use to download DSE with", default=None)
parser.add_option("--dse-password", type="string", dest="dse_password",
help="The password to use to download DSE with", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Populate the new cluster with that number of nodes (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node while populating")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
parser.add_option('-s', "--start", action="store_true", dest="start_nodes",
help="Start nodes added through -s", default=False)
parser.add_option('-d', "--debug", action="store_true", dest="debug",
help="If -s is used, show the standard output when starting the nodes", default=False)
parser.add_option('-b', "--binary-protocol", action="store_true", dest="binary_protocol",
help="Enable the binary protocol (starting from C* 1.2.5 the binary protocol is started by default and this option is a no-op)", default=False)
parser.add_option('-D', "--debug-log", action="store_true", dest="debug_log",
help="With -n, sets debug logging on the new nodes", default=False)
parser.add_option('-T', "--trace-log", action="store_true", dest="trace_log",
help="With -n, sets trace logging on the new nodes", default=False)
parser.add_option("--vnodes", action="store_true", dest="vnodes",
help="Use vnodes (256 tokens). Must be paired with -n.", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
parser.add_option('--ssl', type="string", dest="ssl_path",
help="Path to keystore.jks and cassandra.crt files (and truststore.jks [not required])", default=None)
parser.add_option('--require_client_auth', action="store_true", dest="require_client_auth",
help="Enable client authentication (only vaid with --ssl)", default=False)
parser.add_option('--node-ssl', type="string", dest="node_ssl_path",
help="Path to keystore.jks and truststore.jks for internode encryption", default=None)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.options.vnodes and self.nodes is None:
print_("Can't set --vnodes if not populating cluster in this command.")
parser.print_help()
exit(1)
if not options.version:
try:
common.validate_install_dir(options.install_dir)
except ArgumentError:
parser.print_help()
parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)
common.assert_jdk_valid_for_cassandra_version(common.get_version_from_build(options.install_dir))
def run(self):
try:
if self.options.dse or (not self.options.version and common.isDse(self.options.install_dir)):
cluster = DseCluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, dse_username=self.options.dse_username, dse_password=self.options.dse_password, opscenter=self.options.opscenter, verbose=True)
else:
cluster = Cluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except OSError as e:
cluster_dir = os.path.join(self.path, self.name)
import traceback
print_('Cannot create cluster: %s\n%s' % (str(e), traceback.format_exc()), file=sys.stderr)
exit(1)
if self.options.partitioner:
cluster.set_partitioner(self.options.partitioner)
if cluster.cassandra_version() >= "1.2.5":
self.options.binary_protocol = True
if self.options.binary_protocol:
cluster.set_configuration_options({'start_native_transport': True})
if cluster.cassandra_version() >= "1.2" and self.options.vnodes:
cluster.set_configuration_options({'num_tokens': 256})
if not self.options.no_switch:
common.switch_cluster(self.path, self.name)
print_('Current cluster is now: %s' % self.name)
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
if self.options.ssl_path:
cluster.enable_ssl(self.options.ssl_path, self.options.require_client_auth)
if self.options.node_ssl_path:
cluster.enable_internode_ssl(self.options.node_ssl_path)
if self.nodes is not None:
try:
if self.options.debug_log:
cluster.set_log_level("DEBUG")
if self.options.trace_log:
cluster.set_log_level("TRACE")
cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
if self.options.start_nodes:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if cluster.start(verbose=self.options.debug_log, wait_for_binary_proto=self.options.binary_protocol, jvm_args=self.options.jvm_args, profile_options=profile_options) is None:
details = ""
if not self.options.debug_log:
details = " (you can use --debug-log for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterAddCmd(Cmd):
def description(self):
return "Add a new node to the current cluster"
def get_parser(self):
usage = "usage: ccm add [options] node_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-b', '--auto-bootstrap', action="store_true", dest="bootstrap",
help="Set auto bootstrap for the node", default=False)
parser.add_option('-s', '--seeds', action="store_true", dest="is_seed",
help="Configure this node as a seed", default=False)
parser.add_option('-i', '--itf', type="string", dest="itfs",
help="Set host and port for thrift, the binary protocol and storage (format: host[:port])")
parser.add_option('-t', '--thrift-itf', type="string", dest="thrift_itf",
help="Set the thrift host and port for the node (format: host[:port])")
parser.add_option('-l', '--storage-itf', type="string", dest="storage_itf",
help="Set the storage (cassandra internal) host and port for the node (format: host[:port])")
parser.add_option('--binary-itf', type="string", dest="binary_itf",
help="Set the binary protocol host and port for the node (format: host[:port]).")
parser.add_option('-j', '--jmx-port', type="string", dest="jmx_port",
help="JMX port for the node", default="7199")
parser.add_option('-r', '--remote-debug-port', type="string", dest="remote_debug_port",
help="Remote Debugging Port for the node", default="2000")
parser.add_option('-n', '--token', type="string", dest="initial_token",
help="Initial token for the node", default=None)
parser.add_option('-d', '--data-center', type="string", dest="data_center",
help="Datacenter name this node is part of", default=None)
parser.add_option('--dse', action="store_true", dest="dse_node",
help="Add node to DSE Cluster", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True, load_node=False)
if options.itfs is None and (options.thrift_itf is None or options.storage_itf is None or options.binary_itf is None):
print_('Missing thrift and/or storage and/or binary protocol interfaces or jmx port', file=sys.stderr)
parser.print_help()
exit(1)
used_jmx_ports = [node.jmx_port for node in self.cluster.nodelist()]
if options.jmx_port in used_jmx_ports:
print_("This JMX port is already in use. Choose another.", file=sys.stderr)
parser.print_help()
exit(1)
if options.thrift_itf is None:
options.thrift_itf = options.itfs
if options.storage_itf is None:
options.storage_itf = options.itfs
if options.binary_itf is None:
options.binary_itf = options.itfs
self.thrift = common.parse_interface(options.thrift_itf, 9160)
self.storage = common.parse_interface(options.storage_itf, 7000)
self.binary = common.parse_interface(options.binary_itf, 9042)
if self.binary[0] != self.thrift[0]:
print_('Cannot set a binary address different from the thrift one', file=sys.stderr)
exit(1)
self.jmx_port = options.jmx_port
self.remote_debug_port = options.remote_debug_port
self.initial_token = options.initial_token
def run(self):
try:
if self.options.dse_node:
node = DseNode(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
else:
node = Node(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
self.cluster.add(node, self.options.is_seed, self.options.data_center)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterPopulateCmd(Cmd):
def description(self):
return "Add a group of new nodes with default options"
def get_parser(self):
usage = "usage: ccm populate -n <node count> {-d}"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Number of nodes to populate with (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-d', '--debug', action="store_true", dest="debug",
help="Enable remote debugging options", default=False)
parser.add_option('--vnodes', action="store_true", dest="vnodes",
help="Populate using vnodes", default=False)
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.nodes is None:
parser.print_help()
parser.error("Not a valid number of nodes. Did you use -n?")
exit(1)
def run(self):
try:
if self.cluster.cassandra_version() >= "1.2" and self.options.vnodes:
self.cluster.set_configuration_options({'num_tokens': 256})
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
self.cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterListCmd(Cmd):
def description(self):
return "List existing clusters"
def get_parser(self):
usage = "usage: ccm list [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
current = common.current_cluster_name(self.path)
except Exception as e:
current = ''
for dir in os.listdir(self.path):
if os.path.exists(os.path.join(self.path, dir, 'cluster.conf')):
print_(" %s%s" % ('*' if current == dir else ' ', dir))
class ClusterSwitchCmd(Cmd):
def description(self):
return "Switch of current (active) cluster"
def get_parser(self):
usage = "usage: ccm switch [options] cluster_name"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if not os.path.exists(os.path.join(self.path, self.name, 'cluster.conf')):
print_("%s does not appear to be a valid cluster (use ccm list to view valid clusters)" % self.name, file=sys.stderr)
exit(1)
def run(self):
common.switch_cluster(self.path, self.name)
class ClusterStatusCmd(Cmd):
def description(self):
return "Display status on the current cluster"
def get_parser(self):
usage = "usage: ccm status [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print full information on all nodes", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.show(self.options.verbose)
class ClusterRemoveCmd(Cmd):
def description(self):
return "Remove the current or specified cluster (delete all data)"
def get_parser(self):
usage = "usage: ccm remove [options] [cluster_name]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
self.other_cluster = None
if len(args) > 0:
# Setup to remove the specified cluster:
Cmd.validate(self, parser, options, args)
self.other_cluster = args[0]
if not os.path.exists(os.path.join(
self.path, self.other_cluster, 'cluster.conf')):
print_("%s does not appear to be a valid cluster"
" (use ccm list to view valid clusters)"
% self.other_cluster, file=sys.stderr)
exit(1)
else:
# Setup to remove the current cluster:
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
if self.other_cluster:
# Remove the specified cluster:
cluster = ClusterFactory.load(self.path, self.other_cluster)
cluster.remove()
# Remove CURRENT flag if the specified cluster is the current cluster:
if self.other_cluster == common.current_cluster_name(self.path):
os.remove(os.path.join(self.path, 'CURRENT'))
else:
# Remove the current cluster:
self.cluster.remove()
os.remove(os.path.join(self.path, 'CURRENT'))
class ClusterClearCmd(Cmd):
def description(self):
return "Clear the current cluster data (and stop all nodes)"
def get_parser(self):
usage = "usage: ccm clear [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.clear()
class ClusterLivesetCmd(Cmd):
def description(self):
return "Print a comma-separated list of addresses of running nodes (handful in scripts)"
def get_parser(self):
usage = "usage: ccm liveset [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
l = [node.network_interfaces['storage'][0] for node in list(self.cluster.nodes.values()) if node.is_live()]
print_(",".join(l))
class ClusterSetdirCmd(Cmd):
def description(self):
return "Set the install directory (cassandra or dse) to use"
def get_parser(self):
usage = "usage: ccm setdir [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
parser.add_option('-n', '--node', type="string", dest="node",
help="Set directory only for the specified node")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
target = self.cluster
if self.options.node:
target = self.cluster.nodes.get(self.options.node)
if not target:
print_("Node not found: %s" % self.options.node)
return
target.set_install_dir(install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterClearrepoCmd(Cmd):
def description(self):
return "Cleanup downloaded cassandra sources"
def get_parser(self):
usage = "usage: ccm clearrepo [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
repository.clean_all()
class ClusterStartCmd(Cmd):
def description(self):
return "Start all the non started nodes of the current cluster"
def get_parser(self):
usage = "usage: ccm cluster start [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print standard output of cassandra process", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for cassandra node to be ready", default=False)
parser.add_option('--wait-other-notice', action="store_true", dest="wait_other_notice",
help="Wait until all other live nodes of the cluster have marked this node UP", default=False)
parser.add_option('--wait-for-binary-proto', action="store_true", dest="wait_for_binary_proto",
help="Wait for the binary protocol to start", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
parser.add_option('--quiet-windows', action="store_true", dest="quiet_start", help="Pass -q on Windows 2.2.4+ and 3.0+ startup. Ignored on linux.", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if len(self.cluster.nodes) == 0:
print_("No node in this cluster yet. Use the populate command before starting.")
exit(1)
if self.cluster.start(no_wait=self.options.no_wait,
wait_other_notice=self.options.wait_other_notice,
wait_for_binary_proto=self.options.wait_for_binary_proto,
verbose=self.options.verbose,
jvm_args=self.options.jvm_args,
profile_options=profile_options,
quiet_start=self.options.quiet_start) is None:
details = ""
if not self.options.verbose:
details = " (you can use --verbose for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
exit(1)
except NodeError as e:
print_(str(e), file=sys.stderr)
print_("Standard error output is:", file=sys.stderr)
for line in e.process.stderr:
print_(line.rstrip('\n'), file=sys.stderr)
exit(1)
class ClusterStopCmd(Cmd):
def description(self):
return "Stop all the nodes of the cluster"
def get_parser(self):
usage = "usage: ccm cluster stop [options] name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print nodes that were not running", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for the node to be stopped", default=False)
parser.add_option('-g', '--gently', action="store_true", dest="gently",
help="Shut down gently (default)", default=True)
parser.add_option('--not-gently', action="store_false", dest="gently",
help="Shut down immediately (kill -9)", default=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
not_running = self.cluster.stop(not self.options.no_wait, gently=self.options.gently)
if self.options.verbose and len(not_running) > 0:
sys.stdout.write("The following nodes were not running: ")
for node in not_running:
sys.stdout.write(node.name + " ")
print_("")
except NodeError as e:
print_(str(e), file=sys.stderr)
exit(1)
class _ClusterNodetoolCmd(Cmd):
def get_parser(self):
parser = self._get_default_parser(self.usage, self.description())
return parser
def description(self):
return self.descr_text
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.nodetool(self.nodetool_cmd)
class ClusterFlushCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster flush [options] name"
nodetool_cmd = 'flush'
descr_text = "Flush all (running) nodes of the cluster"
class ClusterCompactCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster compact [options] name"
nodetool_cmd = 'compact'
descr_text = "Compact all (running) node of the cluster"
class ClusterDrainCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster drain [options] name"
nodetool_cmd = 'drain'
descr_text = "Drain all (running) node of the cluster"
class ClusterStressCmd(Cmd):
def description(self):
return "Run stress using all live nodes"
def get_parser(self):
usage = "usage: ccm stress [options] [stress_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.stress_options = args + parser.get_ignored()
def run(self):
try:
self.cluster.stress(self.stress_options)
except Exception as e:
print_(e, file=sys.stderr)
class ClusterUpdateconfCmd(Cmd):
def description(self):
return "Update the cassandra config files for all nodes"
def get_parser(self):
usage = "usage: ccm updateconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'compaction_throughput_mb_per_sec: 32'; nested options can be separated with a period like 'client_encryption_options.enabled: false'"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-hh', '--no-hinted-handoff', action="store_false",
dest="hinted_handoff", default=True, help="Disable hinted handoff")
parser.add_option('--batch-cl', '--batch-commit-log', action="store_true",
dest="cl_batch", default=False, help="Set commit log to batch mode")
parser.add_option('--rt', '--rpc-timeout', action="store", type='int',
dest="rpc_timeout", help="Set rpc timeout")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.setting['hinted_handoff_enabled'] = self.options.hinted_handoff
if self.options.rpc_timeout is not None:
if self.cluster.cassandra_version() < "1.2":
self.setting['rpc_timeout_in_ms'] = self.options.rpc_timeout
else:
self.setting['read_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['range_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['write_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['truncate_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['request_timeout_in_ms'] = self.options.rpc_timeout
self.cluster.set_configuration_options(values=self.setting, batch_commitlog=self.options.cl_batch)
class ClusterUpdatedseconfCmd(Cmd):
def description(self):
return "Update the dse config files for all nodes"
def get_parser(self):
usage = "usage: ccm updatedseconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'max_solr_concurrency_per_core: 2'; nested options can be separated with a period like 'cql_slow_log_options.enabled: true'"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.cluster.set_dse_configuration_options(values=self.setting)
#
# Class implements the functionality of updating log4j-server.properties
# on ALL nodes by copying the given config into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
class ClusterUpdatelog4jCmd(Cmd):
def description(self):
return "Update the Cassandra log4j-server.properties configuration file on all nodes"
def get_parser(self):
usage = "usage: ccm updatelog4j -p <log4j config>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-p', '--path', type="string", dest="log4jpath",
help="Path to new Cassandra log4j configuration file")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.log4jpath = options.log4jpath
if self.log4jpath is None:
raise KeyError("[Errno] -p or --path <path of new log4j congiguration file> is not provided")
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
except KeyError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.cluster.update_log4j(self.log4jpath)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterCliCmd(Cmd):
def description(self):
return "Launch cassandra cli connected to some live node (if any)"
def get_parser(self):
usage = "usage: ccm cli [options] [cli_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-x', '--exec', type="string", dest="cmds", default=None,
help="Execute the specified commands and exit")
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="With --exec, show cli output after completion", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.cli_options = parser.get_ignored() + args[1:]
def run(self):
self.cluster.run_cli(self.options.cmds, self.options.verbose, self.cli_options)
class ClusterBulkloadCmd(Cmd):
def description(self):
return "Bulkload files into the cluster by connecting to some live node (if any)"
def get_parser(self):
usage = "usage: ccm bulkload [options] [sstable_dir]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.loader_options = parser.get_ignored() + args
def run(self):
self.cluster.bulkload(self.loader_options)
class ClusterScrubCmd(Cmd):
def description(self):
return "Scrub files"
def get_parser(self):
usage = "usage: ccm scrub [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.scrub_options = parser.get_ignored() + args
def run(self):
self.cluster.scrub(self.scrub_options)
class ClusterVerifyCmd(Cmd):
def description(self):
return "Verify files"
def get_parser(self):
usage = "usage: ccm verify [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.verify_options = parser.get_ignored() + args
def run(self):
self.cluster.verify(self.verify_options)
class ClusterSetlogCmd(Cmd):
def description(self):
return "Set log level (INFO, DEBUG, ...) with/without Java class for all node of the cluster - require a node restart"
def get_parser(self):
usage = "usage: ccm setlog [options] level"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-c', '--class', type="string", dest="class_name", default=None,
help="Optional java class/package. Logging will be set for only this class/package if set")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if len(args) == 0:
print_('Missing log level', file=sys.stderr)
parser.print_help()
exit(1)
self.level = args[0]
def run(self):
try:
self.cluster.set_log_level(self.level, self.options.class_name)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterInvalidatecacheCmd(Cmd):
def description(self):
return "Destroys ccm's local git cache."
def get_parser(self):
usage = "usage: ccm invalidatecache"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
common.invalidate_cache()
except Exception as e:
print_(str(e), file=sys.stderr)
print_("Error while deleting cache. Please attempt manually.")
exit(1)
class ClusterChecklogerrorCmd(Cmd):
def description(self):
return "Check for errors in log file of each node."
def get_parser(self):
usage = "usage: ccm checklogerror"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
for node in self.cluster.nodelist():
errors = node.grep_log_for_errors()
for mylist in errors:
for line in mylist:
print_(line)
class ClusterShowlastlogCmd(Cmd):
def description(self):
return "Show the last.log for the most recent build through your $PAGER"
def get_parser(self):
usage = "usage: ccm showlastlog"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
log = repository.lastlogfilename()
pager = os.environ.get('PAGER', common.platform_pager())
os.execvp(pager, (pager, log))
class ClusterJconsoleCmd(Cmd):
def description(self):
return "Opens jconsole client and connects to all running nodes"
def get_parser(self):
usage = "usage: ccm jconsole"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
cmds = ["jconsole"] + ["localhost:%s" % node.jmx_port for node in self.cluster.nodes.values()]
try:
subprocess.call(cmds, stderr=sys.stderr)
except OSError as e:
print_("Could not start jconsole. Please make sure jconsole can be found in your $PATH.")
exit(1)
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from copy import deepcopy
from bg.breakpoint_graph import BreakpointGraph
from bg.edge import BGEdge
from bg.genome import BGGenome
from bg.multicolor import Multicolor
from bg.utils import add_to_dict_with_path
from bg.vertices import BlockVertex, TaggedVertex, TaggedBlockVertex, TaggedInfinityVertex, BGVertex
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
class GRIMMReader(object):
""" Class providing a staticmethod based implementation of reading GRIMM formatted data file-like object and obtain a :class:`bg.breakpoint_graph.BreakpointGraph` instance.
There are no private methods implementations for all public methods so inheritance shall be performed with caution.
For now GRIMM format is a bit simplified and straightened from the version provided at http://grimm.ucsd.edu/GRIMM/grimm_instr.html
Supported GRIMM format:
#) all strings are stripped from both sides for tabs, spaces, etc. Below when said "string", stripped string is assumed
#) ``genome declaration`` is specified on a string that starts with ``>``
#) ``genome name`` is everything, that follows ``>`` sign
#) all input data before the next genome declaration (or EOF) will be attributed to this genome by its ``genome name``
#) a data string (containing information about gene orders) is a string that is not a genome declaration, comment, empty string
#) every new genomic fragments (chromosome/scaffold/contig/etc) must be specified on a new string
#) every data string must contain a ``$`` (for linear case) or ``@`` (for circular case) gene order terminator, that indicates the end of current genomic fragment
#) everything after the gene order terminator is ignored
#) if no gene order before gene order terminator is specified an error would be raised
#) gene order:
#) gene order is a sequence of space separated block name strings with optional orientation declaration
#) block can be described by a regular expression ``^((-|\+).+$)|([^-\+]+$)`` and viewed as follows:
if the sign (``+`` or ``-``) is present as a first character, then it must be followed by a nonempty block name string
if sign is not present, everything is assumed to be a block name, and ``+`` orientation is assigned to it automatically
#) comment string starts with ``#`` sign and is ignored during data processing
Main operations:
* :meth:`GRIMMReader.is_genome_declaration_string`: checks if supplied string after stripping corresponds to ``genome declaration``
* :meth:`GRIMMReader.is_comment_string`: checks if supplied string after stripping corresponds to comment and shall thus be ignored in data processing
* :meth:`GRIMMReader.parse_genome_declaration_string`: parses a string marked as ``genome declaration`` and returns a corresponding genome name
* :meth:`GRIMMReader.parse_data_string`: parses a string assumed to contain gene order data, retrieving information about fragment type, gene order, blocks names and their orientation
* :meth:`GRIMMReader.get_edges_from_parsed_data`: taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
* :meth:`GRIMMReader.get_breakpoint_graph`: taking a file-like object transforms supplied gene order data into the language of BreakpointGraph
"""
COMMENT_DATA_STRING_SEPARATOR = "::"
PATH_SEPARATOR_STRING = ":"
@staticmethod
def is_genome_declaration_string(data_string):
""" Checks if supplied string after stripping corresponds to ``genome declaration``
:param data_string: a string to check genome name declaration in
:type data_string: ``str``
:return: a flag indicating if supplied string corresponds to genome name declaration
:rtype: ``Boolean``
"""
data_string = data_string.strip()
return data_string.startswith(">") and len(data_string) > 1
@staticmethod
def is_comment_string(data_string):
""" Checks if supplied string after stripping corresponds to comment and shall thus be ignored in data processing
:param data_string: a string to check if it is a pure comment string
:type data_string: ``str``
:return: a flag indicating if supplied string is a pure comment string
:rtype: ``Boolean``
"""
return data_string.strip().startswith("#")
@staticmethod
def parse_genome_declaration_string(data_string):
""" Parses a string marked as ``genome declaration`` and returns a corresponding :class:`bg.genome.BGGenome`
:param data_string: a string to retrieve genome name from
:type data_string: ``str``
:return: genome name from supplied genome declaration string
:rtype: :class:`bg.genome.BGGenome`
"""
data_string = data_string.strip()
return BGGenome(data_string[1:])
@staticmethod
def parse_data_string(data_string):
""" Parses a string assumed to contain gene order data, retrieving information about fragment type, gene order, blocks names and their orientation
First checks if gene order termination signs are present.
Selects the earliest one.
Checks that information preceding is not empty and contains gene order.
Generates results structure by retrieving information about fragment type, blocks names and orientations.
**NOTE:** comment signs do not work in data strings. Rather use the fact that after first gene order termination sign everything is ignored for processing
:param data_string: a string to retrieve gene order information from
:type data_string: ``str``
:return: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted structure corresponding to gene order in supplied data string and containing fragments type
:rtype: ``tuple(str, list((str, str), ...))``
"""
data_string = data_string.strip()
linear_terminator_index = data_string.index("$") if "$" in data_string else -1
circular_terminator_index = data_string.index("@") if "@" in data_string else -1
if linear_terminator_index < 0 and circular_terminator_index < 0:
raise ValueError("Invalid data string. No chromosome termination sign ($|@) found.")
if linear_terminator_index == 0 or circular_terminator_index == 0:
raise ValueError("Invalid data string. No data found before chromosome was terminated.")
if linear_terminator_index < 0 or 0 < circular_terminator_index < linear_terminator_index:
###############################################################################################
#
# we either encountered only a circular chromosome termination sign
# or we have encountered it before we've encountered the circular chromosome termination sign first
#
###############################################################################################
chr_type = "@"
terminator_index = circular_terminator_index
else:
chr_type = "$"
terminator_index = linear_terminator_index
###############################################################################################
#
# everything after first fragment termination sign is omitted
#
###############################################################################################
data = data_string[:terminator_index].strip()
###############################################################################################
#
# genomic blocks are separated between each other by the space character
#
###############################################################################################
split_data = data.split()
blocks = []
for block in split_data:
###############################################################################################
#
# since positively oriented blocks can be denoted both as "+block" as well as "block"
# we need to figure out where "block" name starts
#
###############################################################################################
cut_index = 1 if block.startswith("-") or block.startswith("+") else 0
if cut_index == 1 and len(block) == 1:
###############################################################################################
#
# block can not be empty
# from this one can derive the fact, that names "+" and "-" for blocks are forbidden
#
###############################################################################################
raise ValueError("Empty block name definition")
blocks.append(("-" if block.startswith("-") else "+", block[cut_index:]))
return chr_type, blocks
@staticmethod
def __assign_vertex_pair(block):
""" Assigns usual BreakpointGraph type vertices to supplied block.
Vertices are labeled as "block_name" + "h" and "block_name" + "t" according to blocks orientation.
:param block: information about a genomic block to create a pair of vertices for in a format of ( ``+`` | ``-``, block_name)
:type block: ``(str, str)``
:return: a pair of vertices labeled according to supplied blocks name (respecting blocks orientation)
:rtype: ``(str, str)``
"""
sign, name = block
data = name.split(BlockVertex.NAME_SEPARATOR)
root_name, data = data[0], data[1:]
tags = [entry.split(TaggedVertex.TAG_SEPARATOR) for entry in data]
for tag_entry in tags:
if len(tag_entry) == 1:
tag_entry.append(None)
elif len(tag_entry) > 2:
tag_entry[1:] = [TaggedVertex.TAG_SEPARATOR.join(tag_entry[1:])]
tail, head = root_name + "t", root_name + "h"
tail, head = TaggedBlockVertex(tail), TaggedBlockVertex(head)
tail.mate_vertex = head
head.mate_vertex = tail
for tag, value in tags:
head.add_tag(tag, value)
tail.add_tag(tag, value)
return (tail, head) if sign == "+" else (head, tail)
@staticmethod
def get_edges_from_parsed_data(parsed_data):
""" Taking into account fragment type (circular|linear) and retrieved gene order information translates adjacencies between blocks into edges for addition to the :class:`bg.breakpoint_graph.BreakpointGraph`
In case supplied fragment is linear (``$``) special artificial vertices (with ``__infinity`` suffix) are introduced to denote fragment extremities
:param parsed_data: (``$`` | ``@``, [(``+`` | ``-``, block_name),...]) formatted data about fragment type and ordered list of oriented blocks
:type parsed_data: ``tuple(str, list((str, str), ...))``
:return: a list of vertices pairs that would correspond to edges in :class:`bg.breakpoint_graph.BreakpointGraph`
:rtype: ``list((str, str), ...)``
"""
chr_type, blocks = parsed_data
vertices = []
for block in blocks:
###############################################################################################
#
# each block is represented as a pair of vertices (that correspond to block extremities)
#
###############################################################################################
v1, v2 = GRIMMReader.__assign_vertex_pair(block)
vertices.append(v1)
vertices.append(v2)
if chr_type == "@":
###############################################################################################
#
# if we parse a circular genomic fragment we must introduce an additional pair of vertices (edge)
# that would connect two outer most vertices in the vertex list, thus connecting fragment extremities
#
###############################################################################################
vertex = vertices.pop()
vertices.insert(0, vertex)
elif chr_type == "$":
###############################################################################################
#
# if we parse linear genomic fragment, we introduce two artificial (infinity) vertices
# that correspond to fragments ends, and introduce edges between them and respective outermost block vertices
#
# if outermost vertices at this moment are repeat vertices, the outermost pair shall be discarded and the innermost
# vertex info shall be utilized in the infinity vertex, that is introduced for the fragment extremity
#
###############################################################################################
if vertices[0].is_repeat_vertex:
left_iv_tags = sorted([(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[1].name))
for tag, value in vertices[1].tags])
left_iv_root_name = BGVertex.get_vertex_name_root(vertices[2].name)
vertices = vertices[2:]
else:
left_iv_tags = []
left_iv_root_name = vertices[0].name
if vertices[-1].is_repeat_vertex:
right_iv_tags = sorted(
[(tag, value) if tag != "repeat" else (tag, BGVertex.get_vertex_name_root(vertices[-2].name))
for tag, value in vertices[-2].tags])
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-3].name)
vertices = vertices[:-2]
else:
right_iv_tags = []
right_iv_root_name = BGVertex.get_vertex_name_root(vertices[-1].name)
left_iv, right_iv = TaggedInfinityVertex(left_iv_root_name), TaggedInfinityVertex(right_iv_root_name)
left_iv.tags = left_iv_tags
right_iv.tags = right_iv_tags
vertices.insert(0, left_iv)
vertices.append(right_iv)
return [(v1, v2) for v1, v2 in zip(vertices[::2], vertices[1::2])]
@staticmethod
def get_breakpoint_graph(stream, merge_edges=True):
""" Taking a file-like object transforms supplied gene order data into the language of
:param merge_edges: a flag that indicates if parallel edges in produced breakpoint graph shall be merged or not
:type merge_edges: ``bool``
:param stream: any iterable object where each iteration produces a ``str`` object
:type stream: ``iterable`` ver ``str``
:return: an instance of a BreakpointGraph that contains information about adjacencies in genome specified in GRIMM formatted input
:rtype: :class:`bg.breakpoint_graph.BreakpointGraph`
"""
result = BreakpointGraph()
current_genome = None
fragment_data = {}
for line in stream:
line = line.strip()
if len(line) == 0:
###############################################################################################
#
# empty lines are omitted
#
###############################################################################################
continue
if GRIMMReader.is_genome_declaration_string(data_string=line):
###############################################################################################
#
# is we have a genome declaration, we must update current genome
# all following gene order data (before EOF or next genome declaration) will be attributed to current genome
#
###############################################################################################
current_genome = GRIMMReader.parse_genome_declaration_string(data_string=line)
fragment_data = {}
elif GRIMMReader.is_comment_string(data_string=line):
if GRIMMReader.is_comment_data_string(string=line):
path, (key, value) = GRIMMReader.parse_comment_data_string(comment_data_string=line)
if len(path) > 0 and path[0] == "fragment":
add_to_dict_with_path(destination_dict=fragment_data, key=key, value=value, path=path)
else:
continue
elif current_genome is not None:
###############################################################################################
#
# gene order information that is specified before the first genome is specified can not be attributed to anything
# and thus omitted
#
###############################################################################################
parsed_data = GRIMMReader.parse_data_string(data_string=line)
edges = GRIMMReader.get_edges_from_parsed_data(parsed_data=parsed_data)
for v1, v2 in edges:
edge_specific_data = {
"fragment": {
"forward_orientation": (v1, v2)
}
}
edge = BGEdge(vertex1=v1, vertex2=v2, multicolor=Multicolor(current_genome), data=deepcopy(fragment_data))
edge.update_data(source=edge_specific_data)
result.add_bgedge(bgedge=edge,
merge=merge_edges)
return result
@classmethod
def is_comment_data_string(cls, string):
s = string.strip()
comment_string = cls.is_comment_string(data_string=s)
s = s[1:] # removing # from beginning
split_result = s.split(cls.COMMENT_DATA_STRING_SEPARATOR)
if len(split_result) < 2:
return False
specification = split_result[0]
return comment_string & ("data" == specification.strip())
@classmethod
def parse_comment_data_string(cls, comment_data_string):
entries = list(map(lambda string: string.strip(), comment_data_string.split(cls.COMMENT_DATA_STRING_SEPARATOR)))[1:]
data = list(map(lambda string: string.strip(), entries[0].split(cls.PATH_SEPARATOR_STRING)))
path, key_value_entry = data[:-1], data[-1]
key_value_entry_split = list(map(lambda string: string.strip(), key_value_entry.split("=")))
if len(key_value_entry_split) < 2:
key = ""
value = ""
else:
key, value = key_value_entry_split
return path, (key, value)
class GRIMMWriter(object):
@staticmethod
def get_blocks_in_grimm_from_breakpoint_graph(bg):
"""
:param bg: a breakpoint graph, that contians all the information
:type bg: ``bg.breakpoint_graph.BreakpointGraph``
:return: list of strings, which represent genomes present in breakpoint graph as orders of blocks and is compatible with GRIMM format
"""
result = []
genomes = bg.get_overall_set_of_colors()
for genome in genomes:
genome_graph = bg.get_genome_graph(color=genome)
genome_blocks_orders = genome_graph.get_blocks_order()
blocks_orders = genome_blocks_orders[genome]
if len(blocks_orders) > 0:
result.append(">{genome_name}".format(genome_name=genome.name))
for chr_type, blocks_order in blocks_orders:
string = " ".join(value if sign == "+" else sign + value for sign, value in blocks_order)
string += " {chr_type}".format(chr_type=chr_type)
result.append(string)
return result
@classmethod
def print_genomes_as_grimm_blocks_orders(cls, bg, file_name):
with open(file_name, "wt") as destination:
for grimm_string in cls.get_blocks_in_grimm_from_breakpoint_graph(bg=bg):
print(grimm_string, file=destination)
@staticmethod
def get_fragments_in_grimm_from_breakpoint_graph(bg):
result = []
genomes = bg.get_overall_set_of_colors()
for genome in genomes:
genome_graph = bg.get_genome_graph(color=genome)
fragments_orders = genome_graph.get_fragments_orders()
fragments_orders = fragments_orders[genome]
if len(fragments_orders) > 0 and any(map(lambda entry: len(entry[1]) > 0, fragments_orders)):
result.append(">{genome_name}".format(genome_name=genome.name))
for chr_type, fragments_order in fragments_orders:
string = " ".join(value if sign == "+" else (sign + value) for sign, value in fragments_order)
string += " {chr_type}".format(chr_type=chr_type)
result.append(string)
return result
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import re
import math
import random
PREAMBLE = """
# WARNING: This file is auto-generated. Do NOT modify it manually, but rather
# modify the generating script file. Otherwise changes will be lost!
"""[1:]
class CaseGroup(object):
def __init__(self, name, description, children):
self.name = name
self.description = description
self.children = children
class ShaderCase(object):
def __init__(self):
pass
g_processedCases = {}
def indentTextBlock(text, indent):
indentStr = indent * "\t"
lines = text.split("\n")
lines = [indentStr + line for line in lines]
lines = [ ["", line][line.strip() != ""] for line in lines]
return "\n".join(lines)
def writeCase(f, case, indent, prefix):
print(" %s" % (prefix + case.name))
if isinstance(case, CaseGroup):
f.write(indentTextBlock('group %s "%s"\n\n' % (case.name, case.description), indent))
for child in case.children:
writeCase(f, child, indent + 1, prefix + case.name + ".")
f.write(indentTextBlock("\nend # %s\n" % case.name, indent))
else:
# \todo [petri] Fix hack.
fullPath = prefix + case.name
assert (fullPath not in g_processedCases)
g_processedCases[fullPath] = None
f.write(indentTextBlock(str(case) + "\n", indent))
def writeAllCases(fileName, caseList):
# Write all cases to file.
print(" %s.." % fileName)
f = file(fileName, "wb")
f.write(PREAMBLE + "\n")
for case in caseList:
writeCase(f, case, 0, "")
f.close()
print("done! (%d cases written)" % len(g_processedCases))
# Template operations.
def genValues(inputs, outputs):
res = []
for (name, values) in inputs:
res.append("input %s = [ %s ];" % (name, " | ".join([str(v) for v in values]).lower()))
for (name, values) in outputs:
res.append("output %s = [ %s ];" % (name, " | ".join([str(v) for v in values]).lower()))
return ("\n".join(res))
def fillTemplate(template, params):
s = template
for (key, value) in params.items():
m = re.search(r"^(\s*)\$\{\{%s\}\}$" % key, s, re.M)
if m is not None:
start = m.start(0)
end = m.end(0)
ws = m.group(1)
if value is not None:
repl = "\n".join(["%s%s" % (ws, line) for line in value.split("\n")])
s = s[:start] + repl + s[end:]
else:
s = s[:start] + s[end+1:] # drop the whole line
else:
s = s.replace("${{%s}}" % key, value)
return s
# Return shuffled version of list
def shuffled(lst):
tmp = lst[:]
random.shuffle(tmp)
return tmp
def repeatToLength(lst, toLength):
return (toLength / len(lst)) * lst + lst[: toLength % len(lst)]
# Helpers to convert a list of Scalar/Vec values into another type.
def toFloat(lst): return [Scalar(float(v.x)) for v in lst]
def toInt(lst): return [Scalar(int(v.x)) for v in lst]
def toUint(lst): return [Uint(int(v.x)) for v in lst]
def toBool(lst): return [Scalar(bool(v.x)) for v in lst]
def toVec4(lst): return [v.toFloat().toVec4() for v in lst]
def toVec3(lst): return [v.toFloat().toVec3() for v in lst]
def toVec2(lst): return [v.toFloat().toVec2() for v in lst]
def toIVec4(lst): return [v.toInt().toVec4() for v in lst]
def toIVec3(lst): return [v.toInt().toVec3() for v in lst]
def toIVec2(lst): return [v.toInt().toVec2() for v in lst]
def toBVec4(lst): return [v.toBool().toVec4() for v in lst]
def toBVec3(lst): return [v.toBool().toVec3() for v in lst]
def toBVec2(lst): return [v.toBool().toVec2() for v in lst]
def toUVec4(lst): return [v.toUint().toUVec4() for v in lst]
def toUVec3(lst): return [v.toUint().toUVec3() for v in lst]
def toUVec2(lst): return [v.toUint().toUVec2() for v in lst]
def toMat2(lst): return [v.toMat2() for v in lst]
def toMat2x3(lst): return [v.toMat2x3() for v in lst]
def toMat2x4(lst): return [v.toMat2x4() for v in lst]
def toMat3x2(lst): return [v.toMat3x2() for v in lst]
def toMat3(lst): return [v.toMat3() for v in lst]
def toMat3x4(lst): return [v.toMat3x4() for v in lst]
def toMat4x2(lst): return [v.toMat4x2() for v in lst]
def toMat4x3(lst): return [v.toMat4x3() for v in lst]
def toMat4(lst): return [v.toMat4() for v in lst]
# Random value generation.
class GenRandom(object):
def __init__(self):
pass
def uniformVec4(self, count, mn, mx):
ret = [Vec4(random.uniform(mn, mx), random.uniform(mn, mx), random.uniform(mn, mx), random.uniform(mn, mx)) for x in xrange(count)]
ret[0].x = mn
ret[1].x = mx
ret[2].x = (mn + mx) * 0.5
return ret
def uniformBVec4(self, count):
ret = [Vec4(random.random() >= 0.5, random.random() >= 0.5, random.random() >= 0.5, random.random() >= 0.5) for x in xrange(count)]
ret[0].x = True
ret[1].x = False
return ret
# def uniform(self,
# Math operating on Scalar/Vector types.
def glslSign(a): return 0.0 if (a == 0) else +1.0 if (a > 0.0) else -1.0
def glslMod(x, y): return x - y*math.floor(x/y)
def glslClamp(x, mn, mx): return mn if (x < mn) else mx if (x > mx) else x
class GenMath(object):
@staticmethod
def unary(func): return lambda val: val.applyUnary(func)
@staticmethod
def binary(func): return lambda a, b: (b.expandVec(a)).applyBinary(func, a.expandVec(b))
@staticmethod
def frac(val): return val.applyUnary(lambda x: x - math.floor(x))
@staticmethod
def exp2(val): return val.applyUnary(lambda x: math.pow(2.0, x))
@staticmethod
def log2(val): return val.applyUnary(lambda x: math.log(x, 2.0))
@staticmethod
def rsq(val): return val.applyUnary(lambda x: 1.0 / math.sqrt(x))
@staticmethod
def sign(val): return val.applyUnary(glslSign)
@staticmethod
def isEqual(a, b): return Scalar(a.isEqual(b))
@staticmethod
def isNotEqual(a, b): return Scalar(not a.isEqual(b))
@staticmethod
def step(a, b): return (b.expandVec(a)).applyBinary(lambda edge, x: [1.0, 0.0][x < edge], a.expandVec(b))
@staticmethod
def length(a): return a.length()
@staticmethod
def distance(a, b): return a.distance(b)
@staticmethod
def dot(a, b): return a.dot(b)
@staticmethod
def cross(a, b): return a.cross(b)
@staticmethod
def normalize(a): return a.normalize()
@staticmethod
def boolAny(a): return a.boolAny()
@staticmethod
def boolAll(a): return a.boolAll()
@staticmethod
def boolNot(a): return a.boolNot()
@staticmethod
def abs(a): return a.abs()
# ..
class Scalar(object):
def __init__(self, x):
self.x = x
def applyUnary(self, func): return Scalar(func(self.x))
def applyBinary(self, func, other): return Scalar(func(self.x, other.x))
def isEqual(self, other): assert isinstance(other, Scalar); return (self.x == other.x)
def expandVec(self, val): return val
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.x)
def toVec3(self): return Vec3(self.x, self.x, self.x)
def toVec4(self): return Vec4(self.x, self.x, self.x, self.x)
def toUVec2(self): return UVec2(self.x, self.x)
def toUVec3(self): return UVec3(self.x, self.x, self.x)
def toUVec4(self): return UVec4(self.x, self.x, self.x, self.x)
def toMat2(self): return Mat.fromScalar(2, 2, float(self.x))
def toMat2x3(self): return Mat.fromScalar(2, 3, float(self.x))
def toMat2x4(self): return Mat.fromScalar(2, 4, float(self.x))
def toMat3x2(self): return Mat.fromScalar(3, 2, float(self.x))
def toMat3(self): return Mat.fromScalar(3, 3, float(self.x))
def toMat3x4(self): return Mat.fromScalar(3, 4, float(self.x))
def toMat4x2(self): return Mat.fromScalar(4, 2, float(self.x))
def toMat4x3(self): return Mat.fromScalar(4, 3, float(self.x))
def toMat4(self): return Mat.fromScalar(4, 4, float(self.x))
def toFloat(self): return Scalar(float(self.x))
def toInt(self): return Scalar(int(self.x))
def toUint(self): return Uint(int(self.x))
def toBool(self): return Scalar(bool(self.x))
def getNumScalars(self): return 1
def getScalars(self): return [self.x]
def typeString(self):
if isinstance(self.x, bool):
return "bool"
elif isinstance(self.x, int):
return "int"
elif isinstance(self.x, float):
return "float"
else:
assert False
def vec4Swizzle(self):
return ""
def __str__(self):
return str(self.x).lower()
def __float__(self):
return float(self.x)
def length(self):
return Scalar(abs(self.x))
def distance(self, v):
assert isinstance(v, Scalar)
return Scalar(abs(self.x - v.x))
def dot(self, v):
assert isinstance(v, Scalar)
return Scalar(self.x * v.x)
def normalize(self):
return Scalar(glslSign(self.x))
def abs(self):
if isinstance(self.x, bool):
return Scalar(self.x)
else:
return Scalar(abs(self.x))
def __neg__(self):
return Scalar(-self.x)
def __add__(self, val):
assert isinstance(val, Scalar)
return Scalar(self.x + val.x)
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
return Scalar(self.x * val.x)
elif isinstance(val, Vec2):
return Vec2(self.x * val.x, self.x * val.y)
elif isinstance(val, Vec3):
return Vec3(self.x * val.x, self.x * val.y, self.x * val.z)
elif isinstance(val, Vec4):
return Vec4(self.x * val.x, self.x * val.y, self.x * val.z, self.x * val.w)
else:
assert False
def __div__(self, val):
if isinstance(val, Scalar):
return Scalar(self.x / val.x)
elif isinstance(val, Vec2):
return Vec2(self.x / val.x, self.x / val.y)
elif isinstance(val, Vec3):
return Vec3(self.x / val.x, self.x / val.y, self.x / val.z)
elif isinstance(val, Vec4):
return Vec4(self.x / val.x, self.x / val.y, self.x / val.z, self.x / val.w)
else:
assert False
class Uint(Scalar):
def __init__(self, x):
assert x >= 0
self.x = x
def typeString(self):
return "uint"
def abs(self):
return Scalar.abs(self).toUint()
def __neg__(self):
return Scalar.__neg__(self).toUint()
def __add__(self, val):
return Scalar.__add__(self, val).toUint()
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
return Scalar.__mul__(self, val).toUint()
def __div__(self, val):
return Scalar.__div__(self, val).toUint()
class Vec(object):
@staticmethod
def fromScalarList(lst):
assert (len(lst) >= 1 and len(lst) <= 4)
if (len(lst) == 1): return Scalar(lst[0])
elif (len(lst) == 2): return Vec2(lst[0], lst[1])
elif (len(lst) == 3): return Vec3(lst[0], lst[1], lst[2])
else: return Vec4(lst[0], lst[1], lst[2], lst[3])
def isEqual(self, other):
assert isinstance(other, Vec);
return (self.getScalars() == other.getScalars())
def length(self):
return Scalar(math.sqrt(self.dot(self).x))
def normalize(self):
return self * Scalar(1.0 / self.length().x)
def swizzle(self, indexList):
inScalars = self.getScalars()
outScalars = map(lambda ndx: inScalars[ndx], indexList)
return Vec.fromScalarList(outScalars)
def __init__(self):
pass
def __eq__(self, other):
return self.isEqual(other)
def __ne__(self, other):
return not self.isEqual(other)
class Vec2(Vec):
def __init__(self, x, y):
assert(x.__class__ == y.__class__)
self.x = x
self.y = y
def applyUnary(self, func): return Vec2(func(self.x), func(self.y))
def applyBinary(self, func, other): return Vec2(func(self.x, other.x), func(self.y, other.y))
def expandVec(self, val): return val.toVec2()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, 0.0)
def toVec4(self): return Vec4(self.x, self.y, 0.0, 0.0)
def toUVec2(self): return UVec2(self.x, self.y)
def toUVec3(self): return UVec3(self.x, self.y, 0.0)
def toUVec4(self): return UVec4(self.x, self.y, 0.0, 0.0)
def toMat2(self): return Mat2(float(self.x), 0.0, 0.0, float(self.y));
def toFloat(self): return Vec2(float(self.x), float(self.y))
def toInt(self): return Vec2(int(self.x), int(self.y))
def toUint(self): return UVec2(int(self.x), int(self.y))
def toBool(self): return Vec2(bool(self.x), bool(self.y))
def getNumScalars(self): return 2
def getScalars(self): return [self.x, self.y]
def typeString(self):
if isinstance(self.x, bool):
return "bvec2"
elif isinstance(self.x, int):
return "ivec2"
elif isinstance(self.x, float):
return "vec2"
else:
assert False
def vec4Swizzle(self):
return ".xyxy"
def __str__(self):
if isinstance(self.x, bool):
return "bvec2(%s, %s)" % (str(self.x).lower(), str(self.y).lower())
elif isinstance(self.x, int):
return "ivec2(%i, %i)" % (self.x, self.y)
elif isinstance(self.x, float):
return "vec2(%s, %s)" % (self.x, self.y)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec2)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec2)
return Scalar(self.x*v.x + self.y*v.y)
def abs(self):
if isinstance(self.x, bool):
return Vec2(self.x, self.y)
else:
return Vec2(abs(self.x), abs(self.y))
def __neg__(self):
return Vec2(-self.x, -self.y)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec2(self.x + val, self.y + val)
elif isinstance(val, Vec2):
return Vec2(self.x + val.x, self.y + val.y)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec2()
assert isinstance(val, Vec2)
return Vec2(self.x * val.x, self.y * val.y)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec2(self.x / val.x, self.y / val.x)
else:
assert isinstance(val, Vec2)
return Vec2(self.x / val.x, self.y / val.y)
def boolAny(self): return Scalar(self.x or self.y)
def boolAll(self): return Scalar(self.x and self.y)
def boolNot(self): return Vec2(not self.x, not self.y)
class UVec2(Vec2):
def __init__(self, x, y):
assert isinstance(x, int) and isinstance(y, int)
assert x >= 0 and y >= 0
Vec2.__init__(self, x, y)
def typeString(self):
return "uvec2"
def __str__(self):
return "uvec2(%i, %i)" % (self.x, self.y)
def abs(self):
return Vec2.abs(self).toUint()
class Vec3(Vec):
def __init__(self, x, y, z):
assert((x.__class__ == y.__class__) and (x.__class__ == z.__class__))
self.x = x
self.y = y
self.z = z
def applyUnary(self, func): return Vec3(func(self.x), func(self.y), func(self.z))
def applyBinary(self, func, other): return Vec3(func(self.x, other.x), func(self.y, other.y), func(self.z, other.z))
def expandVec(self, val): return val.toVec3()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, self.z)
def toVec4(self): return Vec4(self.x, self.y, self.z, 0.0)
def toUVec2(self): return UVec2(self.x, self.y)
def toUVec3(self): return UVec3(self.x, self.y, self.z)
def toUVec4(self): return UVec4(self.x, self.y, self.z, 0.0)
def toMat3(self): return Mat3(float(self.x), 0.0, 0.0, 0.0, float(self.y), 0.0, 0.0, 0.0, float(self.z));
def toFloat(self): return Vec3(float(self.x), float(self.y), float(self.z))
def toInt(self): return Vec3(int(self.x), int(self.y), int(self.z))
def toUint(self): return UVec3(int(self.x), int(self.y), int(self.z))
def toBool(self): return Vec3(bool(self.x), bool(self.y), bool(self.z))
def getNumScalars(self): return 3
def getScalars(self): return [self.x, self.y, self.z]
def typeString(self):
if isinstance(self.x, bool):
return "bvec3"
elif isinstance(self.x, int):
return "ivec3"
elif isinstance(self.x, float):
return "vec3"
else:
assert False
def vec4Swizzle(self):
return ".xyzx"
def __str__(self):
if isinstance(self.x, bool):
return "bvec3(%s, %s, %s)" % (str(self.x).lower(), str(self.y).lower(), str(self.z).lower())
elif isinstance(self.x, int):
return "ivec3(%i, %i, %i)" % (self.x, self.y, self.z)
elif isinstance(self.x, float):
return "vec3(%s, %s, %s)" % (self.x, self.y, self.z)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec3)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec3)
return Scalar(self.x*v.x + self.y*v.y + self.z*v.z)
def cross(self, v):
assert isinstance(v, Vec3)
return Vec3(self.y*v.z - v.y*self.z,
self.z*v.x - v.z*self.x,
self.x*v.y - v.x*self.y)
def abs(self):
if isinstance(self.x, bool):
return Vec3(self.x, self.y, self.z)
else:
return Vec3(abs(self.x), abs(self.y), abs(self.z))
def __neg__(self):
return Vec3(-self.x, -self.y, -self.z)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x + val, self.y + val)
elif isinstance(val, Vec3):
return Vec3(self.x + val.x, self.y + val.y, self.z + val.z)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec3()
assert isinstance(val, Vec3)
return Vec3(self.x * val.x, self.y * val.y, self.z * val.z)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x / val.x, self.y / val.x, self.z / val.x)
elif isinstance(val, Vec3):
return Vec3(self.x / val.x, self.y / val.y, self.z / val.z)
else:
assert False
def boolAny(self): return Scalar(self.x or self.y or self.z)
def boolAll(self): return Scalar(self.x and self.y and self.z)
def boolNot(self): return Vec3(not self.x, not self.y, not self.z)
class UVec3(Vec3):
def __init__(self, x, y, z):
assert isinstance(x, int) and isinstance(y, int) and isinstance(z, int)
assert x >= 0 and y >= 0 and z >= 0
Vec3.__init__(self, x, y, z)
def typeString(self):
return "uvec3"
def __str__(self):
return "uvec3(%i, %i, %i)" % (self.x, self.y, self.z)
def abs(self):
return Vec3.abs(self).toUint()
class Vec4(Vec):
def __init__(self, x, y, z, w):
assert((x.__class__ == y.__class__) and (x.__class__ == z.__class__) and (x.__class__ == w.__class__))
self.x = x
self.y = y
self.z = z
self.w = w
def applyUnary(self, func): return Vec4(func(self.x), func(self.y), func(self.z), func(self.w))
def applyBinary(self, func, other): return Vec4(func(self.x, other.x), func(self.y, other.y), func(self.z, other.z), func(self.w, other.w))
def expandVec(self, val): return val.toVec4()
def toScalar(self): return Scalar(self.x)
def toVec2(self): return Vec2(self.x, self.y)
def toVec3(self): return Vec3(self.x, self.y, self.z)
def toVec4(self): return Vec4(self.x, self.y, self.z, self.w)
def toUVec2(self): return UVec2(self.x, self.y)
def toUVec3(self): return UVec3(self.x, self.y, self.z)
def toUVec4(self): return UVec4(self.x, self.y, self.z, self.w)
def toMat2(self): return Mat2(float(self.x), float(self.y), float(self.z), float(self.w))
def toMat4(self): return Mat4(float(self.x), 0.0, 0.0, 0.0, 0.0, float(self.y), 0.0, 0.0, 0.0, 0.0, float(self.z), 0.0, 0.0, 0.0, 0.0, float(self.w));
def toFloat(self): return Vec4(float(self.x), float(self.y), float(self.z), float(self.w))
def toInt(self): return Vec4(int(self.x), int(self.y), int(self.z), int(self.w))
def toUint(self): return UVec4(int(self.x), int(self.y), int(self.z), int(self.w))
def toBool(self): return Vec4(bool(self.x), bool(self.y), bool(self.z), bool(self.w))
def getNumScalars(self): return 4
def getScalars(self): return [self.x, self.y, self.z, self.w]
def typeString(self):
if isinstance(self.x, bool):
return "bvec4"
elif isinstance(self.x, int):
return "ivec4"
elif isinstance(self.x, float):
return "vec4"
else:
assert False
def vec4Swizzle(self):
return ""
def __str__(self):
if isinstance(self.x, bool):
return "bvec4(%s, %s, %s, %s)" % (str(self.x).lower(), str(self.y).lower(), str(self.z).lower(), str(self.w).lower())
elif isinstance(self.x, int):
return "ivec4(%i, %i, %i, %i)" % (self.x, self.y, self.z, self.w)
elif isinstance(self.x, float):
return "vec4(%s, %s, %s, %s)" % (self.x, self.y, self.z, self.w)
else:
assert False
def distance(self, v):
assert isinstance(v, Vec4)
return (self - v).length()
def dot(self, v):
assert isinstance(v, Vec4)
return Scalar(self.x*v.x + self.y*v.y + self.z*v.z + self.w*v.w)
def abs(self):
if isinstance(self.x, bool):
return Vec4(self.x, self.y, self.z, self.w)
else:
return Vec4(abs(self.x), abs(self.y), abs(self.z), abs(self.w))
def __neg__(self):
return Vec4(-self.x, -self.y, -self.z, -self.w)
def __add__(self, val):
if isinstance(val, Scalar):
return Vec3(self.x + val, self.y + val)
elif isinstance(val, Vec4):
return Vec4(self.x + val.x, self.y + val.y, self.z + val.z, self.w + val.w)
else:
assert False
def __sub__(self, val):
return self + (-val)
def __mul__(self, val):
if isinstance(val, Scalar):
val = val.toVec4()
assert isinstance(val, Vec4)
return Vec4(self.x * val.x, self.y * val.y, self.z * val.z, self.w * val.w)
def __div__(self, val):
if isinstance(val, Scalar):
return Vec4(self.x / val.x, self.y / val.x, self.z / val.x, self.w / val.x)
elif isinstance(val, Vec4):
return Vec4(self.x / val.x, self.y / val.y, self.z / val.z, self.w / val.w)
else:
assert False
def boolAny(self): return Scalar(self.x or self.y or self.z or self.w)
def boolAll(self): return Scalar(self.x and self.y and self.z and self.w)
def boolNot(self): return Vec4(not self.x, not self.y, not self.z, not self.w)
class UVec4(Vec4):
def __init__(self, x, y, z, w):
assert isinstance(x, int) and isinstance(y, int) and isinstance(z, int) and isinstance(w, int)
assert x >= 0 and y >= 0 and z >= 0 and w >= 0
Vec4.__init__(self, x, y, z, w)
def typeString(self):
return "uvec4"
def __str__(self):
return "uvec4(%i, %i, %i, %i)" % (self.x, self.y, self.z, self.w)
def abs(self):
return Vec4.abs(self).toUint()
# \note Column-major storage.
class Mat(object):
def __init__ (self, numCols, numRows, scalars):
assert len(scalars) == numRows*numCols
self.numCols = numCols
self.numRows = numRows
self.scalars = scalars
@staticmethod
def fromScalar (numCols, numRows, scalar):
scalars = []
for col in range(0, numCols):
for row in range(0, numRows):
scalars.append(scalar if col == row else 0.0)
return Mat(numCols, numRows, scalars)
@staticmethod
def identity (numCols, numRows):
return Mat.fromScalar(numCols, numRows, 1.0)
def get (self, colNdx, rowNdx):
assert 0 <= colNdx and colNdx < self.numCols
assert 0 <= rowNdx and rowNdx < self.numRows
return self.scalars[colNdx*self.numRows + rowNdx]
def set (self, colNdx, rowNdx, scalar):
assert 0 <= colNdx and colNdx < self.numCols
assert 0 <= rowNdx and rowNdx < self.numRows
self.scalars[colNdx*self.numRows + rowNdx] = scalar
def toMatrix (self, numCols, numRows):
res = Mat.identity(numCols, numRows)
for col in range(0, min(self.numCols, numCols)):
for row in range(0, min(self.numRows, numRows)):
res.set(col, row, self.get(col, row))
return res
def toMat2 (self): return self.toMatrix(2, 2)
def toMat2x3 (self): return self.toMatrix(2, 3)
def toMat2x4 (self): return self.toMatrix(2, 4)
def toMat3x2 (self): return self.toMatrix(3, 2)
def toMat3 (self): return self.toMatrix(3, 3)
def toMat3x4 (self): return self.toMatrix(3, 4)
def toMat4x2 (self): return self.toMatrix(4, 2)
def toMat4x3 (self): return self.toMatrix(4, 3)
def toMat4 (self): return self.toMatrix(4, 4)
def typeString(self):
if self.numRows == self.numCols:
return "mat%d" % self.numRows
else:
return "mat%dx%d" % (self.numCols, self.numRows)
def __str__(self):
return "%s(%s)" % (self.typeString(), ", ".join(["%s" % s for s in self.scalars]))
def isTypeEqual (self, other):
return isinstance(other, Mat) and self.numRows == other.numRows and self.numCols == other.numCols
def isEqual(self, other):
assert self.isTypeEqual(other)
return (self.scalars == other.scalars)
def compMul(self, val):
assert self.isTypeEqual(val)
return Mat(self.numRows, self.numCols, [self.scalars(i) * val.scalars(i) for i in range(self.numRows*self.numCols)])
class Mat2(Mat):
def __init__(self, m00, m01, m10, m11):
Mat.__init__(self, 2, 2, [m00, m10, m01, m11])
class Mat3(Mat):
def __init__(self, m00, m01, m02, m10, m11, m12, m20, m21, m22):
Mat.__init__(self, 3, 3, [m00, m10, m20,
m01, m11, m21,
m02, m12, m22])
class Mat4(Mat):
def __init__(self, m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33):
Mat.__init__(self, 4, 4, [m00, m10, m20, m30,
m01, m11, m21, m31,
m02, m12, m22, m32,
m03, m13, m23, m33])
| |
"""seahub/api2/views.py::Repo api tests.
"""
import json
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from seahub.base.accounts import User
from seahub.share.models import FileShare, UploadLinkShare
from tests.common.utils import randstring
from seaserv import seafile_api, ccnet_api
class RepoOwnerTest(BaseTestCase):
def setUp(self):
self.user_name = self.user.username
self.user_repo_id = self.repo.id
self.group_id = self.group.id
self.fs_share = FileShare.objects.create_dir_link(self.user.username,
self.user_repo_id, self.folder, None, None)
self.fs_upload = UploadLinkShare.objects.create_upload_link_share(self.user.username,
self.user_repo_id, self.folder, None, None)
def tearDown(self):
self.remove_repo()
def test_can_get_owner(self):
self.login_as(self.user)
resp = self.client.get(reverse("api2-repo-owner", args=[self.user_repo_id]))
json_resp = json.loads(resp.content)
assert json_resp['owner'] == self.user.email
def test_can_not_get_if_not_repo_owner(self):
self.login_as(self.admin)
resp = self.client.get(reverse("api2-repo-owner", args=[self.user_repo_id]))
self.assertEqual(403, resp.status_code)
def test_can_not_transfer_repo_to_owner(self):
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.user.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(400, resp.status_code)
def test_can_transfer_repo(self):
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
def test_reshare_to_user_after_transfer_repo(self):
tmp_user = 'tmp_user@email.com'
User.objects.create_user(tmp_user)
# share user's repo to tmp_user with 'rw' permission
seafile_api.share_repo(self.user_repo_id, self.user.username,
tmp_user, 'rw')
assert seafile_api.check_permission_by_path(self.user_repo_id,
'/', tmp_user) == 'rw'
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
assert seafile_api.check_permission_by_path(self.user_repo_id,
'/', tmp_user) == 'rw'
def test_not_reshare_to_user_after_transfer_repo(self):
# remove all share
shared_repos = seafile_api.get_share_in_repo_list(self.admin.username, -1, -1)
for repo in shared_repos:
seafile_api.remove_share(repo.repo_id, self.admin.username,
self.user.username)
seafile_api.remove_share(repo.repo_id, self.user.username,
self.admin.username)
# share user's repo to admin with 'rw' permission
seafile_api.share_repo(self.user_repo_id, self.user.username,
self.admin.username, 'rw')
# assert repo in admin's be shared repo list
shared_repos = seafile_api.get_share_in_repo_list(self.admin.username, -1, -1)
assert shared_repos[0].repo_name == self.repo.repo_name
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
# assert repo NOT in admin's be shared repo list
shared_repos = seafile_api.get_share_in_repo_list(self.admin.username, -1, -1)
assert len(shared_repos) == 0
def test_reshare_to_group_after_transfer_repo(self):
# If new owner in group repo shared to, reshare to group
# share user's repo to group with 'r' permission
seafile_api.set_group_repo(self.user_repo_id, self.group_id,
self.user_name, 'r')
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert group_repos[0].permission == 'r'
# add admin user to group
ccnet_api.group_add_member(self.group_id, self.user_name, self.admin.username)
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
# transfer repo to admin
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert group_repos[0].permission == 'r'
def test_not_reshare_to_group_after_transfer_repo(self):
# If new owner NOT in group repo shared to, NOT reshare to group
# share user's repo to group with 'r' permission
seafile_api.set_group_repo(self.user_repo_id, self.group_id,
self.user_name, 'r')
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert group_repos[0].permission == 'r'
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
# transfer repo to admin
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert len(group_repos) == 0
def test_reshare_to_user_group_after_transfer_repo(self):
tmp_user = 'tmp_user@email.com'
User.objects.create_user(tmp_user)
# add admin user to group
ccnet_api.group_add_member(self.group_id, self.user_name, self.admin.username)
# share user's repo to tmp_user with 'rw' permission
seafile_api.share_repo(self.user_repo_id, self.user.username,
tmp_user, 'rw')
# share user's repo to group with 'r' permission
seafile_api.set_group_repo(self.user_repo_id, self.group_id,
self.user_name, 'r')
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert group_repos[0].permission == 'r'
assert seafile_api.check_permission_by_path(self.user_repo_id,
'/', tmp_user) == 'rw'
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
# transfer repo to admin
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(200, resp.status_code)
group_repos = seafile_api.get_repos_by_group(self.group_id)
assert group_repos[0].permission == 'r'
assert seafile_api.check_permission_by_path(self.user_repo_id,
'/', tmp_user) == 'rw'
def test_can_not_transfer_if_not_repo_owner(self):
self.login_as(self.admin)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(403, resp.status_code)
def test_can_not_transfer_repo_to_unregistered_user(self):
self.login_as(self.user)
url = reverse("api2-repo-owner", args=[self.user_repo_id])
unregistered_user = '%s@%s.com' % (randstring(6), randstring(6))
data = 'owner=%s' % unregistered_user
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
self.assertEqual(404, resp.status_code)
def test_reshare_to_share_links_after_transfer_repo(self):
self.login_as(self.user)
assert len(UploadLinkShare.objects.all()) == 1
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
resp = self.client.put(url, data, 'application/x-www-form-urlencoded')
fs = FileShare.objects.get(repo_id=self.user_repo_id)
assert fs.username == self.admin.email
def test_reshare_to_upload_links_after_transfer_repo(self):
self.login_as(self.user)
assert len(UploadLinkShare.objects.all()) == 1
url = reverse("api2-repo-owner", args=[self.user_repo_id])
data = 'owner=%s' % self.admin.email
self.client.put(url, data, 'application/x-www-form-urlencoded')
fs = UploadLinkShare.objects.get(repo_id=self.user_repo_id)
assert fs.username == self.admin.email
| |
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import logging
import os
import socket
import sys
import tornado.gen
import tornado.iostream
from .. import errors
from .. import frame
from .. import glossary
from .. import messages
from ..errors import ConnectionClosedError
from ..errors import InvalidErrorCodeError
from ..errors import TChannelError
from ..event import EventType
from ..io import BytesIO
from ..messages.common import PROTOCOL_VERSION
from ..messages.common import FlagsType
from ..messages.common import StreamState
from ..messages.error import ErrorMessage
from ..messages.types import Types
from .message_factory import MessageFactory
from .message_factory import build_protocol_exception
from .util import chain
try:
import tornado.queues as queues # included in 4.2
QueueEmpty = queues.QueueEmpty
except ImportError:
import toro as queues
from Queue import Empty as QueueEmpty
log = logging.getLogger('tchannel')
class TornadoConnection(object):
"""Manages a bi-directional TChannel conversation between two machines.
The following primary bi-directional operations are provided:
``write(message)``
Send the message up the wire.
``await(message)``
Receive a message.
``send(message)``
Send a message and receive its response.
In addition to those, the following operations are provided and should be
used depending on the direction of the connection.
``initiate_handshake``
Perform a handshake with the remote host.
``expect_handshake``
Expect a handshake request from the remote host.
"""
CALL_REQ_TYPES = frozenset([Types.CALL_REQ, Types.CALL_REQ_CONTINUE])
CALL_RES_TYPES = frozenset([Types.CALL_RES, Types.CALL_RES_CONTINUE])
def __init__(self, connection, tchannel=None):
assert connection, "connection is required"
self.closed = False
self.connection = connection
sockname = connection.socket.getsockname()
if len(sockname) == 2:
(self.remote_host,
self.remote_host_port) = sockname
elif len(sockname) == 1:
self.remote_host = sockname[0]
self.remote_host_port = 0
else:
self.remote_host = "0.0.0.0"
self.remote_host_port = 0
self.remote_host_port = int(self.remote_host_port)
self.remote_process_name = None
self.requested_version = PROTOCOL_VERSION
# Tracks message IDs for this connection.
self._id_sequence = 0
# We need to use two separate message factories to avoid message ID
# collision while assembling fragmented messages.
self.request_message_factory = MessageFactory(self.remote_host,
self.remote_host_port)
self.response_message_factory = MessageFactory(self.remote_host,
self.remote_host_port)
# Queue of unprocessed incoming calls.
self._messages = queues.Queue()
# Map from message ID to futures for responses of outgoing calls.
self._outstanding = {}
# Whether _loop is running. The loop doesn't run until after the
# handshake has been performed.
self._loop_running = False
self.tchannel = tchannel
connection.set_close_callback(self._on_close)
def next_message_id(self):
self._id_sequence = (self._id_sequence + 1) % glossary.MAX_MESSAGE_ID
return self._id_sequence
def _on_close(self):
self.closed = True
for message_id, future in self._outstanding.iteritems():
future.set_exception(
ConnectionClosedError(
"canceling outstanding request %d" % message_id
)
)
self._outstanding = {}
try:
while True:
message = self._messages.get_nowait()
log.warn("Unconsumed message %s", message)
except QueueEmpty:
pass
def await(self):
"""Get the next call to this TChannel."""
if self._loop_running:
return self._messages.get()
else:
return self._recv()
def _recv(self):
"""Receive the next message off the wire.
:returns:
A Future that produces a Context object containing the next
message off the wire.
"""
# This is the message_future we'll return for any inbound messages.
message_future = tornado.gen.Future()
def on_body(read_body_future, size):
if read_body_future.exception():
return on_error(read_body_future)
body = read_body_future.result()
f = frame.frame_rw.read(BytesIO(body), size=size)
message_rw = messages.RW[f.header.message_type]
message = message_rw.read(BytesIO(f.payload))
message.id = f.header.message_id
message_future.set_result(message)
def on_read_size(read_size_future):
if read_size_future.exception():
return on_error(read_size_future)
size_bytes = read_size_future.result()
size = frame.frame_rw.size_rw.read(BytesIO(size_bytes))
read_body_future = self.connection.read_bytes(size - size_width)
read_body_future.add_done_callback(
lambda future: on_body(future, size)
)
return read_body_future
def on_error(future):
exception = future.exception()
if isinstance(exception, tornado.iostream.StreamClosedError):
self.close()
size_width = frame.frame_rw.size_rw.width()
self.connection.read_bytes(size_width).add_done_callback(on_read_size)
return message_future
@tornado.gen.coroutine
def _loop(self):
# Receive messages off the wire. All messages are either responses to
# outstanding requests or calls.
#
# Must be started only after the handshake has been performed.
self._loop_running = True
while not self.closed:
message = yield self._recv()
# TODO: There should probably be a try-catch on the yield.
if message.message_type in self.CALL_REQ_TYPES:
self._messages.put(message)
continue
elif message.id in self._outstanding:
# set exception if receive error message
if message.message_type == Types.ERROR:
future = self._outstanding.pop(message.id)
if future.running():
protocol_exception = build_protocol_exception(message)
future.set_exception(protocol_exception)
else:
protocol_exception = (
self.response_message_factory.build(message)
)
if protocol_exception:
self.event_emitter.fire(
EventType.after_receive_error,
protocol_exception,
)
continue
response = self.response_message_factory.build(message)
# keep continue message in the list
# pop all other type messages including error message
if (message.message_type in self.CALL_RES_TYPES and
message.flags == FlagsType.fragment):
# still streaming, keep it for record
future = self._outstanding.get(message.id)
else:
future = self._outstanding.pop(message.id)
if response and future.running():
future.set_result(response)
continue
log.warn('Unconsumed message %s', message)
# Basically, the only difference between send and write is that send
# sets up a Future to get the response. That's ideal for peers making
# calls. Peers responding to calls must use write.
def send(self, message):
"""Send the given message up the wire.
Use this for messages which have a response message.
:param message:
Message to send
:returns:
A Future containing the response for the message
"""
assert not self.closed
assert self._loop_running, "Perform a handshake first."
assert message.message_type in self.CALL_REQ_TYPES, (
"Message '%s' can't use send" % repr(message)
)
message.id = message.id or self.next_message_id()
assert message.id not in self._outstanding, (
"Message ID '%d' already being used" % message.id
)
future = tornado.gen.Future()
self._outstanding[message.id] = future
self.write(message)
return future
def write(self, message):
"""Writes the given message up the wire.
Does not expect a response back for the message.
:param message:
Message to write.
"""
assert not self.closed
message.id = message.id or self.next_message_id()
if message.message_type in self.CALL_REQ_TYPES:
message_factory = self.request_message_factory
else:
message_factory = self.response_message_factory
fragments = message_factory.fragment(message)
return chain(fragments, self._write)
def _write(self, message):
"""Writes the given message up the wire.
The message must be small enough to fit in a single frame.
"""
message.id = message.id or self.next_message_id()
payload = messages.RW[message.message_type].write(
message, BytesIO()
).getvalue()
f = frame.Frame(
header=frame.FrameHeader(
message_type=message.message_type,
message_id=message.id,
),
payload=payload
)
body = frame.frame_rw.write(f, BytesIO()).getvalue()
return self.connection.write(body)
def close(self):
self.connection.close()
@tornado.gen.coroutine
def initiate_handshake(self, headers):
"""Initiate a handshake with the remote host.
:param headers:
A dictionary of headers to send.
:returns:
A future that resolves (with a value of None) when the handshake
is complete.
"""
self._write(messages.InitRequestMessage(
version=PROTOCOL_VERSION,
headers=headers
))
init_res = yield self._recv()
if init_res.message_type != Types.INIT_RES:
raise errors.InvalidMessageError(
"Expected handshake response, got %s" % repr(init_res)
)
self._extract_handshake_headers(init_res)
# The receive loop is started only after the handshake has been
# completed.
self._loop()
@tornado.gen.coroutine
def expect_handshake(self, headers):
"""Expect a handshake from the remote host.
:param headers:
Headers to respond with
:returns:
A future that resolves (with a value of None) when the handshake
is complete.
"""
init_req = yield self._recv()
if init_req.message_type != Types.INIT_REQ:
raise errors.InvalidMessageError(
"You need to shake my hand first. Got %s" % repr(init_req)
)
self._extract_handshake_headers(init_req)
self._write(
messages.InitResponseMessage(
PROTOCOL_VERSION, headers, init_req.id),
)
# The receive loop is started only after the handshake has been
# completed.
self._loop()
def _extract_handshake_headers(self, message):
if not message.host_port:
raise errors.InvalidMessageError(
'Missing required header: host_port'
)
if not message.process_name:
raise errors.InvalidMessageError(
'Missing required header: process_name'
)
(self.remote_host,
self.remote_host_port) = message.host_port.rsplit(':', 1)
self.remote_host_port = int(self.remote_host_port)
self.remote_process_name = message.process_name
self.requested_version = message.version
@classmethod
@tornado.gen.coroutine
def outgoing(cls, hostport, process_name=None, serve_hostport=None,
handler=None, tchannel=None):
"""Initiate a new connection to the given host.
:param hostport:
String in the form ``$host:$port`` specifying the target host
:param process_name:
Process name of the entity making the connection.
:param serve_hostport:
String in the form ``$host:$port`` specifying an address at which
the caller can be reached. If omitted, ``0.0.0.0:0`` is used.
:param handler:
If given, any calls received from this connection will be sent to
this RequestHandler.
"""
host, port = hostport.rsplit(":", 1)
process_name = process_name or "%s[%s]" % (sys.argv[0], os.getpid())
serve_hostport = serve_hostport or "0.0.0.0:0"
# TODO: change this to tornado.tcpclient.TCPClient to do async DNS
# lookups.
stream = tornado.iostream.IOStream(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
)
log.debug("Connecting to %s", hostport)
try:
yield stream.connect((host, int(port)))
except socket.error as e:
log.exception("Couldn't connect to %s", hostport)
raise ConnectionClosedError(
"Couldn't connect to %s" % hostport, e
)
connection = cls(stream, tchannel)
log.debug("Performing handshake with %s", hostport)
yield connection.initiate_handshake(headers={
'host_port': serve_hostport,
'process_name': process_name,
})
if handler:
connection.serve(handler)
raise tornado.gen.Return(connection)
@tornado.gen.coroutine
def serve(self, handler):
"""Serve calls over this connection using the given RequestHandler.
:param handler:
RequestHandler to process the requests through
:return:
A Future that resolves (to None) once the loop is done running --
which happens once this connection is closed.
"""
assert handler, "handler is required"
assert self._loop_running, "Finish the handshake first"
while not self.closed:
message = yield self.await()
try:
handler(message, self)
except Exception:
# TODO Send error frame back
logging.exception("Failed to process %s", repr(message))
def send_error(self, code, description, message_id):
"""Convenience method for writing Error frames up the wire.
:param code:
Error code
:param description:
Error description
:param message_id:
Message in response to which this error is being sent
"""
if code not in ErrorMessage.ERROR_CODES.keys():
raise InvalidErrorCodeError(code)
return self._write(
ErrorMessage(
code=code,
description=description,
id=message_id,
),
)
def ping(self):
return self._write(messages.PingRequestMessage())
def pong(self):
return self._write(messages.PingResponseMessage())
class StreamConnection(TornadoConnection):
"""Streaming request/response into protocol messages and sent by tornado
connection
Here are public apis provided by StreamConnection:
"post_response(response)"
stream response object into wire
"stream_request(request)"
stream request object into wire without waiting for a response
"send_request(request)"
stream request object into wire and wait for a response
"""
@tornado.gen.coroutine
def _stream(self, context, message_factory):
"""write request/response into frames
Transform request/response into protocol level message objects based on
types and argstreams.
Assumption: the chunk data read from stream can fit into memory.
If arg stream is at init or streaming state, build the message based on
current chunk. If arg stream is at completed state, put current chunk
into args[] array, and continue to read next arg stream in order to
compose a larger message instead of sending multi small messages.
Note: the message built at this stage is not guaranteed the size is
less then 64KB.
Possible messages created sequence:
Take request as an example::
CallRequestMessage(flags=fragment)
--> CallRequestContinueMessage(flags=fragment)
....
--> CallRequestContinueMessage(flags=fragment)
--> CallRequestMessage(flags=none)
:param context: Request or Response object
"""
args = []
try:
for argstream in context.argstreams:
chunk = yield argstream.read()
args.append(chunk)
chunk = yield argstream.read()
while chunk:
message = (message_factory.
build_raw_message(context, args))
yield self.write(message)
args = [chunk]
chunk = yield argstream.read()
# last piece of request/response.
message = (message_factory.
build_raw_message(context, args, is_completed=True))
yield self.write(message)
context.state = StreamState.completed
# Stop streamming immediately if exception occurs on the handler side
except TChannelError as e:
# raise by tchannel intentionally
log.info("Stop Outgoing Streams because of error: %s", e.message)
@tornado.gen.coroutine
def post_response(self, response):
try:
# TODO: before_send_response
yield self._stream(response, self.response_message_factory)
# event: send_response
self.tchannel.event_emitter.fire(
EventType.after_send_response,
response,
)
finally:
response.close_argstreams(force=True)
def stream_request(self, request):
"""send the given request and response is not required"""
request.close_argstreams()
stream_future = self._stream(request, self.request_message_factory)
stream_future.add_done_callback(
lambda f: request.close_argstreams(force=True),
)
return stream_future
def send_request(self, request):
"""Send the given request and response is required.
Use this for messages which have a response message.
:param request:
request to send
:returns:
A Future containing the response for the request
"""
assert not self.closed
assert self._loop_running, "Perform a handshake first."
assert request.id not in self._outstanding, (
"Message ID '%d' already being used" % request.id
)
future = tornado.gen.Future()
self._outstanding[request.id] = future
self.stream_request(request)
# the actual future that caller will yield
response_future = tornado.gen.Future()
# TODO: fire before_receive_response
tornado.ioloop.IOLoop.current().add_future(
future,
lambda f: self.adapt_result(f, request, response_future),
)
return response_future
def adapt_result(self, f, request, response_future):
if not response_future.running():
return
if f.exception():
protocol_exception = f.exception()
protocol_exception.tracing = request.tracing
response_future.set_exception(protocol_exception)
else:
response = f.result()
response.tracing = request.tracing
response_future.set_result(response)
def remove_outstanding_request(self, request):
"""Remove request from pending request list"""
self._outstanding.pop(request.id, None)
| |
"""
@package mi.instrument.teledyne.workhorse.driver
@file marine-integrations/mi/instrument/teledyne/workhorse/driver.py
@author Sung Ahn
@brief Driver for the Teledyne Workhorse class instruments
Release notes:
Generic Driver for ADCPS-K, ADCPS-I, ADCPT-B and ADCPT-DE
"""
import time
import struct
import re
from contextlib import contextmanager
from mi.core.log import get_logger
from mi.core.common import Units, Prefixes
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.chunker import StringChunker
from mi.core.common import BaseEnum
from mi.core.time_tools import get_timestamp_delayed
from mi.core.exceptions import InstrumentParameterException, InstrumentTimeoutException, InstrumentException, \
SampleException
from mi.core.exceptions import InstrumentProtocolException
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol, InitializationType
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.util import dict_equal
from mi.instrument.teledyne.workhorse.pd0_parser import AdcpPd0Record
from mi.instrument.teledyne.workhorse.particles import \
AdcpCompassCalibrationDataParticle, AdcpSystemConfigurationDataParticle, AdcpAncillarySystemDataParticle, \
AdcpTransmitPathParticle, AdcpPd0ConfigParticle, AdcpPd0EngineeringParticle, \
Pd0BeamParticle, Pd0CoordinateTransformType, Pd0EarthParticle, WorkhorseDataParticleType
__author__ = 'Sung Ahn'
__license__ = 'Apache 2.0'
log = get_logger()
# default timeout.
TIMEOUT = 20
# newline.
NEWLINE = '\n'
DEFAULT_CMD_TIMEOUT = 20
DEFAULT_WRITE_DELAY = 0
ZERO_TIME_INTERVAL = '00:00:00'
BASE_YEAR = 2000
class WorkhorsePrompt(BaseEnum):
"""
Device i/o prompts..
"""
COMMAND = '\r\n>'
BREAK = 'BREAK'
SAMPLING = 'CS\r\n'
class WorkhorseEngineeringParameter(BaseEnum):
# Engineering parameters for the scheduled commands
CLOCK_SYNCH_INTERVAL = 'clockSynchInterval'
GET_STATUS_INTERVAL = 'getStatusInterval'
class WorkhorseParameter(DriverParameter):
"""
Device parameters
"""
#
# set-able parameters
#
SERIAL_DATA_OUT = 'CD' # 000 000 000 Serial Data Out (Vel;Cor;Amp PG;St;P0 P1;P2;P3)
SERIAL_FLOW_CONTROL = 'CF' # Flow Control
BANNER = 'CH' # Banner
INSTRUMENT_ID = 'CI' # Int 0-255
SLEEP_ENABLE = 'CL' # SLEEP Enable
SAVE_NVRAM_TO_RECORDER = 'CN' # Save NVRAM to RECORD
POLLED_MODE = 'CP' # Polled Mode
XMIT_POWER = 'CQ' # 0=Low, 255=High
LATENCY_TRIGGER = 'CX' # Latency Trigger
HEADING_ALIGNMENT = 'EA' # Heading Alignment
HEADING_BIAS = 'EB' # Heading Bias
SPEED_OF_SOUND = 'EC' # 1500 Speed Of Sound (m/s)
TRANSDUCER_DEPTH = 'ED' # Transducer Depth
PITCH = 'EP' # Pitch
ROLL = 'ER' # Roll
SALINITY = 'ES' # 35 (0-40 pp thousand)
COORDINATE_TRANSFORMATION = 'EX'
SENSOR_SOURCE = 'EZ' # Sensor Source (C;D;H;P;R;S;T)
DATA_STREAM_SELECTION = 'PD' # Data Stream selection
# VADCP parameters
SYNC_PING_ENSEMBLE = 'SA'
RDS3_MODE_SEL = 'SM' # 0=off, 1=master, 2=slave
SLAVE_TIMEOUT = 'ST'
SYNCH_DELAY = 'SW'
ENSEMBLE_PER_BURST = 'TC' # Ensemble per Burst
TIME_PER_ENSEMBLE = 'TE' # 01:00:00.00 (hrs:min:sec.sec/100)
TIME_OF_FIRST_PING = 'TG' # ****/**/**,**:**:** (CCYY/MM/DD,hh:mm:ss)
TIME_PER_PING = 'TP' # 00:00.20 (min:sec.sec/100)
TIME = 'TT' # 2013/02/26,05:28:23 (CCYY/MM/DD,hh:mm:ss)
BUFFERED_OUTPUT_PERIOD = 'TX' # Buffered Output Period
FALSE_TARGET_THRESHOLD = 'WA' # 255,001 (Max)(0-255),Start Bin # <--------- TRICKY.... COMPLEX TYPE
BANDWIDTH_CONTROL = 'WB' # Bandwidth Control (0=Wid,1=Nar)
CORRELATION_THRESHOLD = 'WC' # 064 Correlation Threshold
SERIAL_OUT_FW_SWITCHES = 'WD' # 111100000 Data Out (Vel;Cor;Amp PG;St;P0 P1;P2;P3)
ERROR_VELOCITY_THRESHOLD = 'WE' # 5000 Error Velocity Threshold (0-5000 mm/s)
BLANK_AFTER_TRANSMIT = 'WF' # 0088 Blank After Transmit (cm)
CLIP_DATA_PAST_BOTTOM = 'WI' # 0 Clip Data Past Bottom (0=OFF,1=ON)
RECEIVER_GAIN_SELECT = 'WJ' # 1 Rcvr Gain Select (0=Low,1=High)
NUMBER_OF_DEPTH_CELLS = 'WN' # Number of depth cells (1-255)
PINGS_PER_ENSEMBLE = 'WP' # Pings per Ensemble (0-16384)
SAMPLE_AMBIENT_SOUND = 'WQ' # Sample Ambient sound
DEPTH_CELL_SIZE = 'WS' # 0800 Depth Cell Size (cm)
TRANSMIT_LENGTH = 'WT' # 0000 Transmit Length 0 to 3200(cm) 0 = Bin Length
PING_WEIGHT = 'WU' # 0 Ping Weighting (0=Box,1=Triangle)
AMBIGUITY_VELOCITY = 'WV' # 175 Mode 1 Ambiguity Vel (cm/s radial)
# Engineering parameters for the scheduled commands
CLOCK_SYNCH_INTERVAL = WorkhorseEngineeringParameter.CLOCK_SYNCH_INTERVAL
GET_STATUS_INTERVAL = WorkhorseEngineeringParameter.GET_STATUS_INTERVAL
class WorkhorseInstrumentCmds(BaseEnum):
"""
Device specific commands
Represents the commands the driver implements and the string that
must be sent to the instrument to execute the command.
"""
# Instrument Commands
OUTPUT_CALIBRATION_DATA = 'AC'
START_LOGGING = 'CS'
GET_SYSTEM_CONFIGURATION = 'PS0'
RUN_TEST_200 = 'PT200'
OUTPUT_PT2 = 'PT2'
OUTPUT_PT4 = 'PT4'
# Engineering commands
SET = 'set'
GET = 'get'
class WorkhorseProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class WorkhorseProtocolEvent(BaseEnum):
"""
Protocol events
"""
# Scheduled events
SCHEDULED_CLOCK_SYNC = 'PROTOCOL_EVENT_SCHEDULED_CLOCK_SYNC'
SCHEDULED_GET_STATUS = 'PROTOCOL_EVENT_SCHEDULED_GET_STATUS'
# Recovery
RECOVER_AUTOSAMPLE = 'PROTOCOL_EVENT_RECOVER_AUTOSAMPLE'
# Base events
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
RUN_TEST = DriverEvent.RUN_TEST
GET = DriverEvent.GET
SET = DriverEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
class WorkhorseCapability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
START_AUTOSAMPLE = WorkhorseProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = WorkhorseProtocolEvent.STOP_AUTOSAMPLE
CLOCK_SYNC = WorkhorseProtocolEvent.CLOCK_SYNC
RUN_TEST = WorkhorseProtocolEvent.RUN_TEST
ACQUIRE_STATUS = WorkhorseProtocolEvent.ACQUIRE_STATUS
GET = WorkhorseProtocolEvent.GET
SET = WorkhorseProtocolEvent.SET
START_DIRECT = WorkhorseProtocolEvent.START_DIRECT
STOP_DIRECT = WorkhorseProtocolEvent.STOP_DIRECT
DISCOVER = WorkhorseProtocolEvent.DISCOVER
class WorkhorseScheduledJob(BaseEnum):
CLOCK_SYNC = 'clock_sync'
GET_CONFIGURATION = 'acquire_configuration'
class WorkhorseADCPUnits(Units):
PPTHOUSAND = 'ppt'
parameter_regexes = {
WorkhorseParameter.SERIAL_DATA_OUT: r'CD = (\d\d\d \d\d\d \d\d\d) \-+ Serial Data Out ',
WorkhorseParameter.SERIAL_FLOW_CONTROL: r'CF = (\d+) \-+ Flow Ctrl ',
WorkhorseParameter.BANNER: r'CH = (\d) \-+ Suppress Banner',
WorkhorseParameter.INSTRUMENT_ID: r'CI = (\d+) \-+ Instrument ID ',
WorkhorseParameter.SLEEP_ENABLE: r'CL = (\d) \-+ Sleep Enable',
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: r'CN = (\d) \-+ Save NVRAM to recorder',
WorkhorseParameter.POLLED_MODE: r'CP = (\d) \-+ PolledMode ',
WorkhorseParameter.XMIT_POWER: r'CQ = (\d+) \-+ Xmt Power ',
WorkhorseParameter.LATENCY_TRIGGER: r'CX = (\d) \-+ Trigger Enable ',
WorkhorseParameter.HEADING_ALIGNMENT: r'EA = ([+\-]\d+) \-+ Heading Alignment',
WorkhorseParameter.HEADING_BIAS: r'EB = ([+\-]\d+) \-+ Heading Bias',
WorkhorseParameter.SPEED_OF_SOUND: r'EC = (\d+) \-+ Speed Of Sound',
WorkhorseParameter.TRANSDUCER_DEPTH: r'ED = (\d+) \-+ Transducer Depth ',
WorkhorseParameter.PITCH: r'EP = ([+\-\d]+) \-+ Tilt 1 Sensor ',
WorkhorseParameter.ROLL: r'ER = ([+\-\d]+) \-+ Tilt 2 Sensor ',
WorkhorseParameter.SALINITY: r'ES = (\d+) \-+ Salinity ',
WorkhorseParameter.COORDINATE_TRANSFORMATION: r'EX = (\d+) \-+ Coord Transform ',
WorkhorseParameter.SENSOR_SOURCE: r'EZ = (\d+) \-+ Sensor Source ',
WorkhorseParameter.DATA_STREAM_SELECTION: r'PD = (\d+) \-+ Data Stream Select',
WorkhorseParameter.ENSEMBLE_PER_BURST: r'TC (\d+) \-+ Ensembles Per Burst',
WorkhorseParameter.TIME_PER_ENSEMBLE: r'TE (\d\d:\d\d:\d\d.\d\d) \-+ Time per Ensemble ',
WorkhorseParameter.TIME_OF_FIRST_PING: r'TG (..../../..,..:..:..) - Time of First Ping ',
WorkhorseParameter.TIME_PER_PING: r'TP (\d\d:\d\d.\d\d) \-+ Time per Ping',
WorkhorseParameter.TIME: r'TT (\d\d\d\d/\d\d/\d\d,\d\d:\d\d:\d\d) \- Time Set ',
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: r'TX (\d\d:\d\d:\d\d) \-+ Buffer Output Period:',
WorkhorseParameter.FALSE_TARGET_THRESHOLD: r'WA (\d+,\d+) \-+ False Target Threshold ',
WorkhorseParameter.BANDWIDTH_CONTROL: r'WB (\d) \-+ Bandwidth Control ',
WorkhorseParameter.CORRELATION_THRESHOLD: r'WC (\d+) \-+ Correlation Threshold',
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: r'WD ([\d ]+) \-+ Data Out ',
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: r'WE (\d+) \-+ Error Velocity Threshold',
WorkhorseParameter.BLANK_AFTER_TRANSMIT: r'WF (\d+) \-+ Blank After Transmit',
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: r'WI (\d) \-+ Clip Data Past Bottom',
WorkhorseParameter.RECEIVER_GAIN_SELECT: r'WJ (\d) \-+ Rcvr Gain Select',
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: r'WN (\d+) \-+ Number of depth cells',
WorkhorseParameter.PINGS_PER_ENSEMBLE: r'WP (\d+) \-+ Pings per Ensemble ',
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: r'WQ (\d) \-+ Sample Ambient Sound',
WorkhorseParameter.DEPTH_CELL_SIZE: r'WS (\d+) \-+ Depth Cell Size',
WorkhorseParameter.TRANSMIT_LENGTH: r'WT (\d+) \-+ Transmit Length ',
WorkhorseParameter.PING_WEIGHT: r'WU (\d) \-+ Ping Weighting ',
WorkhorseParameter.AMBIGUITY_VELOCITY: r'WV (\d+) \-+ Mode 1 Ambiguity Vel ',
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: r'BOGUS',
WorkhorseParameter.GET_STATUS_INTERVAL: r'BOGUS',
WorkhorseParameter.SYNC_PING_ENSEMBLE: r'SA = (\d+) \-+ Synch Before',
WorkhorseParameter.RDS3_MODE_SEL: r'SM = (\d+) \-+ Mode Select',
WorkhorseParameter.SLAVE_TIMEOUT: r'ST = (\d+) \-+ Slave Timeout',
WorkhorseParameter.SYNCH_DELAY: r'SW = (\d+) \-+ Synch Delay',
}
parameter_extractors = {
WorkhorseParameter.SERIAL_DATA_OUT: lambda match: match.group(1),
WorkhorseParameter.SERIAL_FLOW_CONTROL: lambda match: match.group(1),
WorkhorseParameter.BANNER: lambda match: bool(int(match.group(1))),
WorkhorseParameter.INSTRUMENT_ID: lambda match: int(match.group(1)),
WorkhorseParameter.SLEEP_ENABLE: lambda match: int(match.group(1)),
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: lambda match: bool(int(match.group(1))),
WorkhorseParameter.POLLED_MODE: lambda match: bool(int(match.group(1))),
WorkhorseParameter.XMIT_POWER: lambda match: int(match.group(1)),
WorkhorseParameter.LATENCY_TRIGGER: lambda match: bool(int(match.group(1))),
WorkhorseParameter.HEADING_ALIGNMENT: lambda match: int(match.group(1)),
WorkhorseParameter.HEADING_BIAS: lambda match: int(match.group(1)),
WorkhorseParameter.SPEED_OF_SOUND: lambda match: int(match.group(1)),
WorkhorseParameter.TRANSDUCER_DEPTH: lambda match: int(match.group(1)),
WorkhorseParameter.PITCH: lambda match: int(match.group(1)),
WorkhorseParameter.ROLL: lambda match: int(match.group(1)),
WorkhorseParameter.SALINITY: lambda match: int(match.group(1)),
WorkhorseParameter.COORDINATE_TRANSFORMATION: lambda match: match.group(1),
WorkhorseParameter.SENSOR_SOURCE: lambda match: match.group(1),
WorkhorseParameter.DATA_STREAM_SELECTION: lambda match: int(match.group(1)),
WorkhorseParameter.ENSEMBLE_PER_BURST: lambda match: int(match.group(1)),
WorkhorseParameter.TIME_PER_ENSEMBLE: lambda match: match.group(1),
WorkhorseParameter.TIME_OF_FIRST_PING: lambda match: match.group(1),
WorkhorseParameter.TIME_PER_PING: lambda match: match.group(1),
WorkhorseParameter.TIME: lambda match: match.group(1) + " UTC",
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: lambda match: match.group(1),
WorkhorseParameter.FALSE_TARGET_THRESHOLD: lambda match: match.group(1),
WorkhorseParameter.BANDWIDTH_CONTROL: lambda match: int(match.group(1)),
WorkhorseParameter.CORRELATION_THRESHOLD: lambda match: int(match.group(1)),
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: lambda match: match.group(1),
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: lambda match: int(match.group(1)),
WorkhorseParameter.BLANK_AFTER_TRANSMIT: lambda match: int(match.group(1)),
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: lambda match: bool(int(match.group(1))),
WorkhorseParameter.RECEIVER_GAIN_SELECT: lambda match: int(match.group(1)),
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: lambda match: int(match.group(1)),
WorkhorseParameter.PINGS_PER_ENSEMBLE: lambda match: int(match.group(1)),
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: lambda match: bool(int(match.group(1))),
WorkhorseParameter.DEPTH_CELL_SIZE: lambda match: int(match.group(1)),
WorkhorseParameter.TRANSMIT_LENGTH: lambda match: int(match.group(1)),
WorkhorseParameter.PING_WEIGHT: lambda match: int(match.group(1)),
WorkhorseParameter.AMBIGUITY_VELOCITY: lambda match: int(match.group(1)),
WorkhorseParameter.SYNC_PING_ENSEMBLE: lambda match: str(match.group(1)),
WorkhorseParameter.RDS3_MODE_SEL: lambda match: int(match.group(1)),
WorkhorseParameter.SLAVE_TIMEOUT: lambda match: int(match.group(1)),
WorkhorseParameter.SYNCH_DELAY: lambda match: int(match.group(1)),
}
parameter_formatters = {
WorkhorseParameter.SERIAL_DATA_OUT: str,
WorkhorseParameter.SERIAL_FLOW_CONTROL: str,
WorkhorseParameter.BANNER: int,
WorkhorseParameter.INSTRUMENT_ID: str,
WorkhorseParameter.SLEEP_ENABLE: int,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: int,
WorkhorseParameter.POLLED_MODE: int,
WorkhorseParameter.XMIT_POWER: str,
WorkhorseParameter.LATENCY_TRIGGER: int,
WorkhorseParameter.HEADING_ALIGNMENT: str,
WorkhorseParameter.HEADING_BIAS: str,
WorkhorseParameter.SPEED_OF_SOUND: str,
WorkhorseParameter.TRANSDUCER_DEPTH: str,
WorkhorseParameter.PITCH: str,
WorkhorseParameter.ROLL: str,
WorkhorseParameter.SALINITY: str,
WorkhorseParameter.COORDINATE_TRANSFORMATION: str,
WorkhorseParameter.SENSOR_SOURCE: str,
WorkhorseParameter.DATA_STREAM_SELECTION: str,
WorkhorseParameter.ENSEMBLE_PER_BURST: str,
WorkhorseParameter.TIME_PER_ENSEMBLE: str,
WorkhorseParameter.TIME_OF_FIRST_PING: str,
WorkhorseParameter.TIME_PER_PING: str,
WorkhorseParameter.TIME: str,
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: str,
WorkhorseParameter.FALSE_TARGET_THRESHOLD: str,
WorkhorseParameter.BANDWIDTH_CONTROL: str,
WorkhorseParameter.CORRELATION_THRESHOLD: str,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: str,
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: str,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: str,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: int,
WorkhorseParameter.RECEIVER_GAIN_SELECT: str,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: str,
WorkhorseParameter.PINGS_PER_ENSEMBLE: str,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: int,
WorkhorseParameter.DEPTH_CELL_SIZE: str,
WorkhorseParameter.TRANSMIT_LENGTH: str,
WorkhorseParameter.PING_WEIGHT: str,
WorkhorseParameter.AMBIGUITY_VELOCITY: str,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: str,
WorkhorseParameter.GET_STATUS_INTERVAL: str,
WorkhorseParameter.SYNC_PING_ENSEMBLE: str,
WorkhorseParameter.RDS3_MODE_SEL: str,
WorkhorseParameter.SLAVE_TIMEOUT: str,
WorkhorseParameter.SYNCH_DELAY: str,
}
parameter_types = {
WorkhorseParameter.SERIAL_DATA_OUT: ParameterDictType.STRING,
WorkhorseParameter.SERIAL_FLOW_CONTROL: ParameterDictType.STRING,
WorkhorseParameter.BANNER: ParameterDictType.BOOL,
WorkhorseParameter.INSTRUMENT_ID: ParameterDictType.INT,
WorkhorseParameter.SLEEP_ENABLE: ParameterDictType.INT,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: ParameterDictType.BOOL,
WorkhorseParameter.POLLED_MODE: ParameterDictType.BOOL,
WorkhorseParameter.XMIT_POWER: ParameterDictType.INT,
WorkhorseParameter.LATENCY_TRIGGER: ParameterDictType.BOOL,
WorkhorseParameter.HEADING_ALIGNMENT: ParameterDictType.INT,
WorkhorseParameter.HEADING_BIAS: ParameterDictType.INT,
WorkhorseParameter.SPEED_OF_SOUND: ParameterDictType.INT,
WorkhorseParameter.TRANSDUCER_DEPTH: ParameterDictType.INT,
WorkhorseParameter.PITCH: ParameterDictType.INT,
WorkhorseParameter.ROLL: ParameterDictType.INT,
WorkhorseParameter.SALINITY: ParameterDictType.INT,
WorkhorseParameter.COORDINATE_TRANSFORMATION: ParameterDictType.STRING,
WorkhorseParameter.SENSOR_SOURCE: ParameterDictType.STRING,
WorkhorseParameter.DATA_STREAM_SELECTION: ParameterDictType.INT,
WorkhorseParameter.ENSEMBLE_PER_BURST: ParameterDictType.INT,
WorkhorseParameter.TIME_PER_ENSEMBLE: ParameterDictType.STRING,
WorkhorseParameter.TIME_OF_FIRST_PING: ParameterDictType.STRING,
WorkhorseParameter.TIME_PER_PING: ParameterDictType.STRING,
WorkhorseParameter.TIME: ParameterDictType.STRING,
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: ParameterDictType.STRING,
WorkhorseParameter.FALSE_TARGET_THRESHOLD: ParameterDictType.STRING,
WorkhorseParameter.BANDWIDTH_CONTROL: ParameterDictType.INT,
WorkhorseParameter.CORRELATION_THRESHOLD: ParameterDictType.INT,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: ParameterDictType.STRING,
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: ParameterDictType.INT,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: ParameterDictType.INT,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: ParameterDictType.BOOL,
WorkhorseParameter.RECEIVER_GAIN_SELECT: ParameterDictType.INT,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: ParameterDictType.INT,
WorkhorseParameter.PINGS_PER_ENSEMBLE: ParameterDictType.INT,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: ParameterDictType.BOOL,
WorkhorseParameter.DEPTH_CELL_SIZE: ParameterDictType.INT,
WorkhorseParameter.TRANSMIT_LENGTH: ParameterDictType.INT,
WorkhorseParameter.PING_WEIGHT: ParameterDictType.INT,
WorkhorseParameter.AMBIGUITY_VELOCITY: ParameterDictType.INT,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: ParameterDictType.STRING,
WorkhorseParameter.GET_STATUS_INTERVAL: ParameterDictType.STRING,
WorkhorseParameter.SYNC_PING_ENSEMBLE: ParameterDictType.STRING,
WorkhorseParameter.RDS3_MODE_SEL: ParameterDictType.INT,
WorkhorseParameter.SLAVE_TIMEOUT: ParameterDictType.INT,
WorkhorseParameter.SYNCH_DELAY: ParameterDictType.INT,
}
parameter_names = {
WorkhorseParameter.SERIAL_DATA_OUT: "Serial Data Out",
WorkhorseParameter.SERIAL_FLOW_CONTROL: "Serial Flow Control",
WorkhorseParameter.BANNER: "Banner",
WorkhorseParameter.INSTRUMENT_ID: "Instrument ID",
WorkhorseParameter.SLEEP_ENABLE: "Sleep Enable",
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: "Save NVRAM to Recorder",
WorkhorseParameter.POLLED_MODE: "Polled Mode",
WorkhorseParameter.XMIT_POWER: "Transmit Power",
WorkhorseParameter.LATENCY_TRIGGER: "Latency trigger",
WorkhorseParameter.HEADING_ALIGNMENT: "Heading Alignment",
WorkhorseParameter.HEADING_BIAS: "Heading Bias",
WorkhorseParameter.SPEED_OF_SOUND: 'Speed of Sound',
WorkhorseParameter.TRANSDUCER_DEPTH: 'Transducer Depth',
WorkhorseParameter.PITCH: 'Pitch',
WorkhorseParameter.ROLL: 'Roll',
WorkhorseParameter.SALINITY: 'Salinity',
WorkhorseParameter.COORDINATE_TRANSFORMATION: 'Coordinate Transformation',
WorkhorseParameter.SENSOR_SOURCE: 'Sensor Source',
WorkhorseParameter.DATA_STREAM_SELECTION: 'Data Stream Selection',
WorkhorseParameter.ENSEMBLE_PER_BURST: 'Ensemble per Burst',
WorkhorseParameter.TIME_PER_ENSEMBLE: 'Time per Ensemble',
WorkhorseParameter.TIME_OF_FIRST_PING: 'Time of First Ping',
WorkhorseParameter.TIME_PER_PING: 'Time per Ping',
WorkhorseParameter.TIME: 'Time',
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: 'Buffered Output Period',
WorkhorseParameter.FALSE_TARGET_THRESHOLD: 'False Target Threshold',
WorkhorseParameter.BANDWIDTH_CONTROL: 'Bandwidth Control',
WorkhorseParameter.CORRELATION_THRESHOLD: 'Correlation Threshold',
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: 'Serial Out FW Switches',
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: 'Error Velocity Threshold',
WorkhorseParameter.BLANK_AFTER_TRANSMIT: 'Blank After Transmit',
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: 'Clip Data Past Bottom',
WorkhorseParameter.RECEIVER_GAIN_SELECT: 'Receiver Gain Select',
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: 'Number of Depth Cells',
WorkhorseParameter.PINGS_PER_ENSEMBLE: 'Pings Per Ensemble',
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: 'Sample Ambient Sound',
WorkhorseParameter.DEPTH_CELL_SIZE: 'Depth Cell Size',
WorkhorseParameter.TRANSMIT_LENGTH: 'Transmit Length',
WorkhorseParameter.PING_WEIGHT: 'Ping Weight',
WorkhorseParameter.AMBIGUITY_VELOCITY: 'Ambiguity Velocity',
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: 'Clock Sync Interval',
WorkhorseParameter.GET_STATUS_INTERVAL: 'Acquire Status Interval',
WorkhorseParameter.SYNC_PING_ENSEMBLE: 'Sync Ping Ensemble',
WorkhorseParameter.RDS3_MODE_SEL: 'RDS3 Mode Selection',
WorkhorseParameter.SLAVE_TIMEOUT: 'Slave Timeout',
WorkhorseParameter.SYNCH_DELAY: 'Sync Delay'
}
parameter_descriptions = {
WorkhorseParameter.SERIAL_DATA_OUT: 'Firmware switches for serial data types collected by the ADCP. See manual for usage.',
WorkhorseParameter.SERIAL_FLOW_CONTROL: 'Sets various ADCP dta flow-control parameters. See manual for firmware switches.',
WorkhorseParameter.BANNER: 'Enable suppressing the banner: (true | false)',
WorkhorseParameter.SLEEP_ENABLE: 'Enable sleeping between pings: (true | false)',
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: 'Disable saving NVRAM to recorder at the end of a deployment: (true | false)',
WorkhorseParameter.POLLED_MODE: 'Enable ADCP to be polled for data: (true | false)',
WorkhorseParameter.XMIT_POWER: 'Allow transmit power to be set high or low: (0 - 255)',
WorkhorseParameter.LATENCY_TRIGGER: 'Enable the low latency trigger input: (true | false)',
WorkhorseParameter.TIME_PER_ENSEMBLE: 'Minimum interval between data collection cycles.',
WorkhorseParameter.TIME_OF_FIRST_PING: 'Time ADCP wakes up to start data collection.',
WorkhorseParameter.TIME_PER_PING: 'Minimum time between pings.',
WorkhorseParameter.TIME: 'Time of internal real-time clock from last clock sync.',
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: 'Minimum interval between buffered data outputs.',
WorkhorseParameter.BANDWIDTH_CONTROL: 'Profiling mode 1 bandwidth: (0:Wide | 1:Narrow)',
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: 'Enable flagging of velocity data as bad (true | false)',
WorkhorseParameter.RECEIVER_GAIN_SELECT: 'Receiver gain: (0:reduce receiver gain by 40 dB | 1:normal receiver gain)',
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: 'Enable ambient sound samples (true | false)',
WorkhorseParameter.PING_WEIGHT: 'Ensemble weighting method: (0:Box | 1:Triangle)',
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: 'Interval to schedule clock synchronization.',
WorkhorseParameter.GET_STATUS_INTERVAL: 'Interval to schedule acquire status.',
WorkhorseParameter.INSTRUMENT_ID: "Identification of the ADCP: (0 - 255)",
WorkhorseParameter.HEADING_ALIGNMENT: "Correction for physical misalignment between Beam 3 and the heading reference: (-17999 to 18000)",
WorkhorseParameter.HEADING_BIAS: "Correction for electrical/magnetic bias between heading value and heading reference: (-17999 to 18000)",
WorkhorseParameter.SPEED_OF_SOUND: 'Speed of sound value used for ADCP data processing: (1400 - 1600)',
WorkhorseParameter.TRANSDUCER_DEPTH: 'Measurement from sea level to transducer faces: (0 - 65535)',
WorkhorseParameter.PITCH: 'Pitch/tilt 1 angle: (-6000 - 6000)',
WorkhorseParameter.ROLL: 'Roll/tilt 2 angle: (-6000 - 6000)',
WorkhorseParameter.SALINITY: 'Salinity of the water: (0 - 40)',
WorkhorseParameter.COORDINATE_TRANSFORMATION: 'Firmware switches for velocity and percent-good data. See manual for usage.',
WorkhorseParameter.SENSOR_SOURCE: 'Firmware switches to use data from manual settings or from an associated sensor. See manual for usage.',
WorkhorseParameter.DATA_STREAM_SELECTION: 'Type of ensemble output data structure: (0 - 18)',
WorkhorseParameter.ENSEMBLE_PER_BURST: 'Number of ensembles per burst: (0 - 65535)',
WorkhorseParameter.FALSE_TARGET_THRESHOLD:
'False target threshold and starting bin (000-255,000-255)',
WorkhorseParameter.CORRELATION_THRESHOLD: 'Minimum threshold of water-track data that must meet correlation criteria: (0 - 255)',
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: 'Firmware switches for data types collected by the ADCP. See manual for usage.',
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: 'Maximum error velocity for good water-current data: (0 - 5000)',
WorkhorseParameter.BLANK_AFTER_TRANSMIT: 'Moves location of first depth cell away from transducer head: (0 - 9999)',
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: 'Number of depth cells over which the ADCP collects data: (1 - 255)',
WorkhorseParameter.PINGS_PER_ENSEMBLE: 'Number of pings to average in each data ensemble: (0 - 16384)',
WorkhorseParameter.DEPTH_CELL_SIZE: 'Volume of water for one measurement cell: (40 - 3200)',
WorkhorseParameter.TRANSMIT_LENGTH: 'Transmit length different from the depth cell length: (0 - 3200)',
WorkhorseParameter.AMBIGUITY_VELOCITY: 'Radial ambiguity velocity: (2 - 700)',
#VADCP Params
WorkhorseParameter.SYNC_PING_ENSEMBLE: 'Firmware switches for synchronization pulse. See manual for usage.',
WorkhorseParameter.RDS3_MODE_SEL: 'RDS3 Mode: (0:Off | 1:RDS3 master | 2:RDS3 slave | 3: NEMO)',
WorkhorseParameter.SLAVE_TIMEOUT: 'Wait time to hear a synch pulse before slave proceeds: (0 - 10800)',
WorkhorseParameter.SYNCH_DELAY: 'Wait time after sending a pulse: (0 - 65535)'
}
parameter_ranges = {
WorkhorseParameter.BANNER: {True: 'true', False: 'false'},
WorkhorseParameter.SLEEP_ENABLE: {1: 'true', 0: 'false'},
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: {True: 'true', False: 'false'},
WorkhorseParameter.POLLED_MODE: {True: 'true', False: 'false'},
WorkhorseParameter.XMIT_POWER: (0, 255),
WorkhorseParameter.LATENCY_TRIGGER: {True: 'true', False: 'false'},
WorkhorseParameter.BANDWIDTH_CONTROL: {0: 'Wide', 1: 'Narrow'},
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: {True: 'true', False: 'false'},
WorkhorseParameter.RECEIVER_GAIN_SELECT: {0: 'reduce receiver gain by 40 dB', 1: 'normal receiver gain'},
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: {True: 'true', False: 'false'},
WorkhorseParameter.PING_WEIGHT: {0: 'Box', 1: 'Triangle'},
WorkhorseParameter.INSTRUMENT_ID: (0, 255),
WorkhorseParameter.HEADING_ALIGNMENT: (-17999, 18000),
WorkhorseParameter.HEADING_BIAS: (-17999, 18000),
WorkhorseParameter.SPEED_OF_SOUND: (1400, 1600),
WorkhorseParameter.TRANSDUCER_DEPTH: (0, 65535),
WorkhorseParameter.PITCH: (-6000, 6000),
WorkhorseParameter.ROLL: (-6000, 6000),
WorkhorseParameter.SALINITY: (0, 40),
WorkhorseParameter.DATA_STREAM_SELECTION: (0, 18),
WorkhorseParameter.ENSEMBLE_PER_BURST: (0, 65535),
WorkhorseParameter.CORRELATION_THRESHOLD: (0, 255),
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: (0, 5000),
WorkhorseParameter.BLANK_AFTER_TRANSMIT: (0, 9999),
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: (1, 255),
WorkhorseParameter.PINGS_PER_ENSEMBLE: (0, 16384),
WorkhorseParameter.DEPTH_CELL_SIZE: (40, 3200),
WorkhorseParameter.TRANSMIT_LENGTH: (0, 3200),
WorkhorseParameter.AMBIGUITY_VELOCITY: (2, 480),
#VADCP Params
WorkhorseParameter.RDS3_MODE_SEL: {0: 'Off', 1: 'RDS3 master', 2: 'RDS3 slave', 3: 'NEMO'},
WorkhorseParameter.SLAVE_TIMEOUT: (0, 10800),
WorkhorseParameter.SYNCH_DELAY: (0, 65535)
}
parameter_units = {
WorkhorseParameter.HEADING_ALIGNMENT: Prefixes.CENTI + Units.DEGREE_PLANE_ANGLE,
WorkhorseParameter.HEADING_BIAS: Prefixes.CENTI + Units.DEGREE_PLANE_ANGLE,
WorkhorseParameter.SPEED_OF_SOUND: Units.METER + '/' + Units.SECOND,
WorkhorseParameter.TRANSDUCER_DEPTH: Prefixes.DECI + Units.METER,
WorkhorseParameter.PITCH: Prefixes.CENTI + Units.DEGREE_PLANE_ANGLE,
WorkhorseParameter.ROLL: Prefixes.CENTI + Units.DEGREE_PLANE_ANGLE,
WorkhorseParameter.SALINITY: 'ppt',
WorkhorseParameter.BLANK_AFTER_TRANSMIT: Units.CENTIMETER,
WorkhorseParameter.DEPTH_CELL_SIZE: Units.CENTIMETER,
WorkhorseParameter.TRANSMIT_LENGTH: Units.CENTIMETER,
WorkhorseParameter.AMBIGUITY_VELOCITY: Units.CENTIMETER + '/' + Units.SECOND,
WorkhorseParameter.TIME_PER_ENSEMBLE: 'hh:mm:ss:ff',
WorkhorseParameter.TIME_OF_FIRST_PING: 'yy/mm/dd,hh:mm:ss',
WorkhorseParameter.TIME_PER_PING: 'mm:ss:ff',
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: 'hh:mm:ss',
WorkhorseParameter.FALSE_TARGET_THRESHOLD: 'nnn,bbb',
WorkhorseParameter.CORRELATION_THRESHOLD: Units.COUNTS,
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: Units.MILLIMETER + '/' + Units.SECOND,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: 'hh:mm:ss',
WorkhorseParameter.GET_STATUS_INTERVAL: 'hh:mm:ss',
WorkhorseParameter.TIME: 'yyyy/mm/dd,hh:mm:ss',
#VADCP Params
WorkhorseParameter.SYNCH_DELAY: '0.1 '+ Units.MILLISECOND,
WorkhorseParameter.SLAVE_TIMEOUT: Units.SECOND,
}
parameter_startup = {
WorkhorseParameter.SERIAL_DATA_OUT: True,
WorkhorseParameter.SERIAL_FLOW_CONTROL: True,
WorkhorseParameter.BANNER: True,
WorkhorseParameter.INSTRUMENT_ID: True,
WorkhorseParameter.SLEEP_ENABLE: True,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: True,
WorkhorseParameter.POLLED_MODE: True,
WorkhorseParameter.XMIT_POWER: True,
WorkhorseParameter.LATENCY_TRIGGER: True,
WorkhorseParameter.HEADING_ALIGNMENT: True,
WorkhorseParameter.HEADING_BIAS: True,
WorkhorseParameter.SPEED_OF_SOUND: True,
WorkhorseParameter.TRANSDUCER_DEPTH: True,
WorkhorseParameter.PITCH: True,
WorkhorseParameter.ROLL: True,
WorkhorseParameter.SALINITY: True,
WorkhorseParameter.COORDINATE_TRANSFORMATION: True,
WorkhorseParameter.SENSOR_SOURCE: True,
WorkhorseParameter.DATA_STREAM_SELECTION: True,
WorkhorseParameter.ENSEMBLE_PER_BURST: True,
WorkhorseParameter.TIME_PER_ENSEMBLE: True,
WorkhorseParameter.TIME_OF_FIRST_PING: False,
WorkhorseParameter.TIME_PER_PING: True,
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: True,
WorkhorseParameter.FALSE_TARGET_THRESHOLD: True,
WorkhorseParameter.BANDWIDTH_CONTROL: True,
WorkhorseParameter.CORRELATION_THRESHOLD: True,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: True,
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: True,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: True,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: True,
WorkhorseParameter.RECEIVER_GAIN_SELECT: True,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: True,
WorkhorseParameter.PINGS_PER_ENSEMBLE: True,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: True,
WorkhorseParameter.DEPTH_CELL_SIZE: True,
WorkhorseParameter.TRANSMIT_LENGTH: True,
WorkhorseParameter.PING_WEIGHT: True,
WorkhorseParameter.AMBIGUITY_VELOCITY: True,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: True,
WorkhorseParameter.GET_STATUS_INTERVAL: True,
WorkhorseParameter.SYNC_PING_ENSEMBLE: True,
WorkhorseParameter.RDS3_MODE_SEL: True,
WorkhorseParameter.SLAVE_TIMEOUT: True,
WorkhorseParameter.SYNCH_DELAY: True,
}
parameter_direct = {
WorkhorseParameter.SERIAL_DATA_OUT: True,
WorkhorseParameter.SERIAL_FLOW_CONTROL: True,
WorkhorseParameter.BANNER: True,
WorkhorseParameter.INSTRUMENT_ID: True,
WorkhorseParameter.SLEEP_ENABLE: True,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: True,
WorkhorseParameter.POLLED_MODE: True,
WorkhorseParameter.XMIT_POWER: True,
WorkhorseParameter.LATENCY_TRIGGER: True,
WorkhorseParameter.HEADING_ALIGNMENT: True,
WorkhorseParameter.HEADING_BIAS: True,
WorkhorseParameter.SPEED_OF_SOUND: True,
WorkhorseParameter.TRANSDUCER_DEPTH: True,
WorkhorseParameter.PITCH: True,
WorkhorseParameter.ROLL: True,
WorkhorseParameter.SALINITY: True,
WorkhorseParameter.COORDINATE_TRANSFORMATION: True,
WorkhorseParameter.SENSOR_SOURCE: True,
WorkhorseParameter.DATA_STREAM_SELECTION: True,
WorkhorseParameter.ENSEMBLE_PER_BURST: True,
WorkhorseParameter.TIME_PER_ENSEMBLE: True,
WorkhorseParameter.TIME_OF_FIRST_PING: False,
WorkhorseParameter.TIME_PER_PING: True,
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: True,
WorkhorseParameter.FALSE_TARGET_THRESHOLD: True,
WorkhorseParameter.BANDWIDTH_CONTROL: True,
WorkhorseParameter.CORRELATION_THRESHOLD: True,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: True,
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: True,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: True,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: True,
WorkhorseParameter.RECEIVER_GAIN_SELECT: True,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: True,
WorkhorseParameter.PINGS_PER_ENSEMBLE: True,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: True,
WorkhorseParameter.DEPTH_CELL_SIZE: True,
WorkhorseParameter.TRANSMIT_LENGTH: True,
WorkhorseParameter.PING_WEIGHT: True,
WorkhorseParameter.AMBIGUITY_VELOCITY: True,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: False,
WorkhorseParameter.GET_STATUS_INTERVAL: False,
WorkhorseParameter.SYNC_PING_ENSEMBLE: True,
WorkhorseParameter.RDS3_MODE_SEL: True,
WorkhorseParameter.SLAVE_TIMEOUT: True,
WorkhorseParameter.SYNCH_DELAY: True,
}
parameter_visibility = {
WorkhorseParameter.SERIAL_DATA_OUT: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SERIAL_FLOW_CONTROL: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.BANNER: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.INSTRUMENT_ID: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SLEEP_ENABLE: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.POLLED_MODE: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.XMIT_POWER: ParameterDictVisibility.READ_WRITE,
WorkhorseParameter.LATENCY_TRIGGER: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.HEADING_ALIGNMENT: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.HEADING_BIAS: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.COORDINATE_TRANSFORMATION: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.DATA_STREAM_SELECTION: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.ENSEMBLE_PER_BURST: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.TIME_OF_FIRST_PING: ParameterDictVisibility.READ_ONLY,
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SYNC_PING_ENSEMBLE: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.RDS3_MODE_SEL: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SLAVE_TIMEOUT: ParameterDictVisibility.IMMUTABLE,
WorkhorseParameter.SYNCH_DELAY: ParameterDictVisibility.IMMUTABLE,
}
parameter_defaults = {
WorkhorseParameter.SERIAL_DATA_OUT: '000 000 000',
WorkhorseParameter.SERIAL_FLOW_CONTROL: '11110',
WorkhorseParameter.BANNER: False,
WorkhorseParameter.INSTRUMENT_ID: 0,
WorkhorseParameter.SLEEP_ENABLE: 0,
WorkhorseParameter.SAVE_NVRAM_TO_RECORDER: True,
WorkhorseParameter.POLLED_MODE: False,
WorkhorseParameter.XMIT_POWER: 255,
WorkhorseParameter.LATENCY_TRIGGER: False,
WorkhorseParameter.HEADING_ALIGNMENT: 0,
WorkhorseParameter.HEADING_BIAS: 0,
WorkhorseParameter.SPEED_OF_SOUND: 1485,
WorkhorseParameter.TRANSDUCER_DEPTH: 8000,
WorkhorseParameter.PITCH: 0,
WorkhorseParameter.ROLL: 0,
WorkhorseParameter.SALINITY: 35,
WorkhorseParameter.COORDINATE_TRANSFORMATION: '00111',
WorkhorseParameter.SENSOR_SOURCE: '1111101',
WorkhorseParameter.DATA_STREAM_SELECTION: 0,
WorkhorseParameter.ENSEMBLE_PER_BURST: 0,
WorkhorseParameter.TIME_PER_ENSEMBLE: '00:00:00.00',
WorkhorseParameter.TIME_PER_PING: '00:01.00',
WorkhorseParameter.BUFFERED_OUTPUT_PERIOD: '00:00:00',
WorkhorseParameter.FALSE_TARGET_THRESHOLD: '050,001',
WorkhorseParameter.BANDWIDTH_CONTROL: 0,
WorkhorseParameter.CORRELATION_THRESHOLD: 64,
WorkhorseParameter.SERIAL_OUT_FW_SWITCHES: '111100000',
WorkhorseParameter.ERROR_VELOCITY_THRESHOLD: 2000,
WorkhorseParameter.BLANK_AFTER_TRANSMIT: 704,
WorkhorseParameter.CLIP_DATA_PAST_BOTTOM: False,
WorkhorseParameter.RECEIVER_GAIN_SELECT: 1,
WorkhorseParameter.NUMBER_OF_DEPTH_CELLS: 100,
WorkhorseParameter.PINGS_PER_ENSEMBLE: 1,
WorkhorseParameter.SAMPLE_AMBIENT_SOUND: False,
WorkhorseParameter.DEPTH_CELL_SIZE: 800,
WorkhorseParameter.TRANSMIT_LENGTH: 0,
WorkhorseParameter.PING_WEIGHT: 0,
WorkhorseParameter.AMBIGUITY_VELOCITY: 175,
WorkhorseParameter.CLOCK_SYNCH_INTERVAL: '00:00:00',
WorkhorseParameter.GET_STATUS_INTERVAL: '00:00:00',
WorkhorseParameter.SYNC_PING_ENSEMBLE: '001',
WorkhorseParameter.RDS3_MODE_SEL: 0,
WorkhorseParameter.SLAVE_TIMEOUT: 0,
WorkhorseParameter.SYNCH_DELAY: 0,
}
#
# Particle Regex's'
#
ADCP_PD0_PARSED_REGEX = r'\x7f\x7f(..)'
ADCP_PD0_PARSED_REGEX_MATCHER = re.compile(ADCP_PD0_PARSED_REGEX, re.DOTALL)
ADCP_SYSTEM_CONFIGURATION_REGEX = r'Instrument S/N.*?>'
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER = re.compile(ADCP_SYSTEM_CONFIGURATION_REGEX, re.DOTALL)
ADCP_COMPASS_CALIBRATION_REGEX = r'ACTIVE FLUXGATE CALIBRATION MATRICES in NVRAM.*?>'
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER = re.compile(ADCP_COMPASS_CALIBRATION_REGEX, re.DOTALL)
ADCP_ANCILLARY_SYSTEM_DATA_REGEX = r'Ambient +Temperature.*?>'
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER = re.compile(ADCP_ANCILLARY_SYSTEM_DATA_REGEX, re.DOTALL)
ADCP_TRANSMIT_PATH_REGEX = r'IXMT +=.*?>'
ADCP_TRANSMIT_PATH_REGEX_MATCHER = re.compile(ADCP_TRANSMIT_PATH_REGEX, re.DOTALL)
# noinspection PyUnusedLocal
class WorkhorseProtocol(CommandResponseInstrumentProtocol):
"""
Specialization for this version of the workhorse driver
"""
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build Workhorse protocol state machine.
self._protocol_fsm = ThreadSafeFSM(WorkhorseProtocolState, WorkhorseProtocolEvent,
DriverEvent.ENTER, DriverEvent.EXIT)
handlers = {
WorkhorseProtocolState.UNKNOWN: (
(WorkhorseProtocolEvent.ENTER, self._handler_unknown_enter),
(WorkhorseProtocolEvent.EXIT, self._handler_unknown_exit),
(WorkhorseProtocolEvent.DISCOVER, self._handler_unknown_discover),
),
WorkhorseProtocolState.COMMAND: (
(WorkhorseProtocolEvent.ENTER, self._handler_command_enter),
(WorkhorseProtocolEvent.EXIT, self._handler_command_exit),
(WorkhorseProtocolEvent.GET, self._handler_get),
(WorkhorseProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
(WorkhorseProtocolEvent.SET, self._handler_command_set),
(WorkhorseProtocolEvent.CLOCK_SYNC, self._handler_command_clock_sync),
(WorkhorseProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_command_clock_sync),
(WorkhorseProtocolEvent.SCHEDULED_GET_STATUS, self._handler_command_acquire_status),
(WorkhorseProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(WorkhorseProtocolEvent.RUN_TEST, self._handler_command_run_test_200),
(WorkhorseProtocolEvent.ACQUIRE_STATUS, self._handler_command_acquire_status),
(WorkhorseProtocolEvent.RECOVER_AUTOSAMPLE, self._handler_command_recover_autosample),
),
WorkhorseProtocolState.AUTOSAMPLE: (
(WorkhorseProtocolEvent.ENTER, self._handler_autosample_enter),
(WorkhorseProtocolEvent.EXIT, self._handler_autosample_exit),
(WorkhorseProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample),
(WorkhorseProtocolEvent.GET, self._handler_get),
(WorkhorseProtocolEvent.SCHEDULED_CLOCK_SYNC, self._handler_autosample_clock_sync),
(WorkhorseProtocolEvent.SCHEDULED_GET_STATUS, self._handler_autosample_acquire_status),
),
WorkhorseProtocolState.DIRECT_ACCESS: (
(WorkhorseProtocolEvent.ENTER, self._handler_direct_access_enter),
(WorkhorseProtocolEvent.EXIT, self._handler_direct_access_exit),
(WorkhorseProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(WorkhorseProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
)
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Build dictionaries for driver schema
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# Add build handlers for device commands.
self._add_build_handler(WorkhorseInstrumentCmds.OUTPUT_CALIBRATION_DATA, self._build_simple_command)
self._add_build_handler(WorkhorseInstrumentCmds.START_LOGGING, self._build_simple_command)
self._add_build_handler(WorkhorseInstrumentCmds.GET_SYSTEM_CONFIGURATION, self._build_simple_command)
self._add_build_handler(WorkhorseInstrumentCmds.RUN_TEST_200, self._build_simple_command)
self._add_build_handler(WorkhorseInstrumentCmds.SET, self._build_set_command)
self._add_build_handler(WorkhorseInstrumentCmds.GET, self._build_get_command)
self._add_build_handler(WorkhorseInstrumentCmds.OUTPUT_PT2, self._build_simple_command)
self._add_build_handler(WorkhorseInstrumentCmds.OUTPUT_PT4, self._build_simple_command)
# Add response handlers
self._add_response_handler(WorkhorseInstrumentCmds.OUTPUT_CALIBRATION_DATA, self._response_passthrough)
self._add_response_handler(WorkhorseInstrumentCmds.GET_SYSTEM_CONFIGURATION, self._response_passthrough)
self._add_response_handler(WorkhorseInstrumentCmds.RUN_TEST_200, self._response_passthrough)
self._add_response_handler(WorkhorseInstrumentCmds.SET, self._parse_set_response)
self._add_response_handler(WorkhorseInstrumentCmds.GET, self._parse_get_response)
self._add_response_handler(WorkhorseInstrumentCmds.OUTPUT_PT2, self._response_passthrough)
self._add_response_handler(WorkhorseInstrumentCmds.OUTPUT_PT4, self._response_passthrough)
# State state machine in UNKNOWN state.
self._protocol_fsm.start(WorkhorseProtocolState.UNKNOWN)
# commands sent sent to device to be
# filtered in responses for telnet DA
self._sent_cmds = []
self.disable_autosample_recover = False
self._chunker = StringChunker(self.sieve_function)
self.initialize_scheduler()
# dictionary to store last transmitted metadata particle values
# so we can not send updates when nothing changed
self._last_values = {}
def _build_param_dict(self):
for param in parameter_regexes:
self._param_dict.add(param,
parameter_regexes.get(param),
parameter_extractors.get(param),
parameter_formatters.get(param),
type=parameter_types.get(param),
display_name=parameter_names.get(param),
description=parameter_descriptions.get(param),
range=parameter_ranges.get(param),
startup_param=parameter_startup.get(param, False),
direct_access=parameter_direct.get(param, False),
visibility=parameter_visibility.get(param, ParameterDictVisibility.READ_WRITE),
default_value=parameter_defaults.get(param),
units=parameter_units.get(param))
self._param_dict.set_default(WorkhorseParameter.CLOCK_SYNCH_INTERVAL)
self._param_dict.set_default(WorkhorseParameter.GET_STATUS_INTERVAL)
@staticmethod
def sieve_function(raw_data):
"""
Chunker sieve method to help the chunker identify chunks.
@returns a list of chunks identified, if any.
The chunks are all the same type.
"""
sieve_matchers = [ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
ADCP_PD0_PARSED_REGEX_MATCHER]
return_list = []
for matcher in sieve_matchers:
if matcher == ADCP_PD0_PARSED_REGEX_MATCHER:
#
# Have to cope with variable length binary records...
# lets grab the length, then write a proper query to
# snag it.
#
matcher2 = re.compile(r'\x7f\x7f(..)', re.DOTALL)
for match in matcher2.finditer(raw_data):
length = struct.unpack('<H', match.group(1))[0]
end_index = match.start() + length
# read the checksum and compute our own
# if they match we have a PD0 record
if len(raw_data) > end_index + 1:
checksum = struct.unpack_from('<H', raw_data, end_index)[0]
calculated = sum(bytearray(raw_data[match.start():end_index])) & 0xffff
if checksum == calculated:
# include the checksum in our match... (2 bytes)
return_list.append((match.start(), end_index + 2))
else:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _build_command_dict(self):
"""
Build command dictionary
"""
self._cmd_dict.add(WorkhorseCapability.START_AUTOSAMPLE,
display_name="Start Autosample",
description="Place the instrument into autosample mode")
self._cmd_dict.add(WorkhorseCapability.STOP_AUTOSAMPLE,
display_name="Stop Autosample",
description="Exit autosample mode and return to command mode")
self._cmd_dict.add(WorkhorseCapability.CLOCK_SYNC,
display_name="Synchronize Clock")
self._cmd_dict.add(WorkhorseCapability.RUN_TEST,
display_name="Run Test 200")
self._cmd_dict.add(WorkhorseCapability.ACQUIRE_STATUS, timeout=30,
display_name="Acquire Status")
self._cmd_dict.add(WorkhorseCapability.DISCOVER,
display_name="Discover State")
self._cmd_dict.add(WorkhorseCapability.DISCOVER, display_name='Discover')
# #######################################################################
# Private helpers.
# #######################################################################
def _changed(self, particle):
stream = particle.get('stream_name')
values = particle.get('values')
last_values = self._last_values.get(stream)
if values == last_values:
return False
self._last_values[stream] = values
return True
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker.
Pass it to extract_sample with the appropriate particle
objects and REGEXes.
"""
if ADCP_PD0_PARSED_REGEX_MATCHER.match(chunk):
pd0 = AdcpPd0Record(chunk)
transform = pd0.coord_transform.coord_transform
if transform == Pd0CoordinateTransformType.BEAM:
science = Pd0BeamParticle(pd0, port_timestamp=timestamp).generate()
elif transform == Pd0CoordinateTransformType.EARTH:
science = Pd0EarthParticle(pd0, port_timestamp=timestamp).generate()
else:
raise SampleException('Received unknown coordinate transform type: %s' % transform)
# generate the particles
config = AdcpPd0ConfigParticle(pd0, port_timestamp=timestamp).generate()
engineering = AdcpPd0EngineeringParticle(pd0, port_timestamp=timestamp).generate()
out_particles = [science]
for particle in [config, engineering]:
if self._changed(particle):
out_particles.append(particle)
for particle in out_particles:
self._driver_event(DriverAsyncEvent.SAMPLE, particle)
if self.get_current_state() == WorkhorseProtocolState.COMMAND:
self._async_raise_fsm_event(WorkhorseProtocolEvent.RECOVER_AUTOSAMPLE)
log.debug("_got_chunk - successful match for AdcpPd0ParsedDataParticle")
elif self._extract_sample(AdcpCompassCalibrationDataParticle,
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER,
chunk,
timestamp):
log.debug("_got_chunk - successful match for AdcpCompassCalibrationDataParticle")
elif self._extract_sample(AdcpSystemConfigurationDataParticle,
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER,
chunk,
timestamp):
log.debug("_got_chunk - successful match for AdcpSystemConfigurationDataParticle")
elif self._extract_sample(AdcpAncillarySystemDataParticle,
ADCP_ANCILLARY_SYSTEM_DATA_REGEX_MATCHER,
chunk,
timestamp):
log.trace("_got_chunk - successful match for AdcpAncillarySystemDataParticle")
elif self._extract_sample(AdcpTransmitPathParticle,
ADCP_TRANSMIT_PATH_REGEX_MATCHER,
chunk,
timestamp):
log.trace("_got_chunk - successful match for AdcpTransmitPathParticle")
def _send_break_cmd(self, delay):
"""
Send a BREAK to attempt to wake the device.
"""
self._connection.send_break(delay)
def stop_scheduled_job(self, schedule_job):
"""
Remove the scheduled job
@param schedule_job scheduling job.
"""
if self._scheduler is not None:
try:
self._remove_scheduler(schedule_job)
except KeyError:
log.warn("_remove_scheduler could not find %s", schedule_job)
def start_scheduled_job(self, param, schedule_job, protocol_event):
"""
Add a scheduled job
"""
self.stop_scheduled_job(schedule_job)
val = self._param_dict.get(param)
try:
hours, minutes, seconds = [int(x) for x in val.split(':', 2)]
except ValueError:
raise InstrumentParameterException('Bad schedule string! Expected format HH:MM:SS, received %r' % val)
if sum((hours, minutes, seconds)) > 0:
config = {
DriverConfigKey.SCHEDULER: {
schedule_job: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.HOURS: int(hours),
DriverSchedulerConfigKey.MINUTES: int(minutes),
DriverSchedulerConfigKey.SECONDS: int(seconds)
}
}
}
}
self.set_init_params(config)
self._add_scheduler_event(schedule_job, protocol_event)
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if WorkhorseCapability.has(x)]
def _sync_clock(self, command, date_time_param, timeout=TIMEOUT, delay=1, time_format="%Y/%m/%d,%H:%M:%S"):
"""
Send the command to the instrument to synchronize the clock
@param command set command
@param date_time_param: date time parameter that we want to set
@param timeout: command timeout
@param delay: wakeup delay
@param time_format: time format string for set command
"""
str_val = get_timestamp_delayed(time_format)
self._do_cmd_direct(date_time_param + str_val + NEWLINE)
time.sleep(1)
self._get_response(TIMEOUT)
# #######################################################################
# Startup parameter handlers
########################################################################
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
# see if we passed in a list of parameters to query
# if not, use the whole parameter list
parameters = kwargs.get('params')
if parameters is None or WorkhorseParameter.ALL in parameters:
parameters = WorkhorseParameter.list()
# filter out the engineering parameters and ALL
parameters = [p for p in parameters if not WorkhorseEngineeringParameter.has(p) and p != WorkhorseParameter.ALL]
# Get old param dict config.
old_config = self._param_dict.get_config()
if parameters:
# Clear out the line buffer / prompt buffer
# Send ALL get commands sequentially, then grab them all at once
self._linebuf = ''
self._promptbuf = ''
command = ''.join(['%s?%s' % (p, NEWLINE) for p in parameters])
self._do_cmd_direct(command)
regex = re.compile(r'(%s.*?%s.*?>)' % (parameters[0], parameters[-1]), re.DOTALL)
resp = self._get_response(response_regex=regex)
self._param_dict.update_many(resp)
new_config = self._param_dict.get_config()
# Check if there is any changes. Ignore TT
if not dict_equal(new_config, old_config, ['TT']) or kwargs.get('force'):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters
"""
self._verify_not_readonly(*args, **kwargs)
params = args[0]
changed = []
old_config = self._param_dict.get_config()
commands = []
for key, val in params.iteritems():
if WorkhorseEngineeringParameter.has(key):
continue
if val != old_config.get(key):
changed.append(key)
commands.append(self._build_set_command(WorkhorseInstrumentCmds.SET, key, val))
if commands:
# we are going to send the concatenation of all our set commands
self._linebuf = ''
self._do_cmd_direct(''.join(commands))
# we'll need to build a regular expression to retrieve all of the responses
# including any possible errors
if len(commands) == 1:
regex = re.compile(r'(%s.*?)\r\n>' % commands[-1].strip(), re.DOTALL)
else:
regex = re.compile(r'(%s.*?%s.*?)\r\n>' % (commands[0].strip(), commands[-1].strip()), re.DOTALL)
response = self._get_response(response_regex=regex)
self._parse_set_response(response[0], None)
# Handle engineering parameters
force = False
if WorkhorseParameter.CLOCK_SYNCH_INTERVAL in params:
if (params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL] != self._param_dict.get(
WorkhorseParameter.CLOCK_SYNCH_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.CLOCK_SYNCH_INTERVAL,
params[WorkhorseParameter.CLOCK_SYNCH_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.CLOCK_SYNCH_INTERVAL, WorkhorseScheduledJob.CLOCK_SYNC,
WorkhorseProtocolEvent.SCHEDULED_CLOCK_SYNC)
force = True
if WorkhorseParameter.GET_STATUS_INTERVAL in params:
if (params[WorkhorseParameter.GET_STATUS_INTERVAL] != self._param_dict.get(
WorkhorseParameter.GET_STATUS_INTERVAL)):
self._param_dict.set_value(WorkhorseParameter.GET_STATUS_INTERVAL,
params[WorkhorseParameter.GET_STATUS_INTERVAL])
self.start_scheduled_job(WorkhorseParameter.GET_STATUS_INTERVAL,
WorkhorseScheduledJob.GET_CONFIGURATION,
WorkhorseProtocolEvent.SCHEDULED_GET_STATUS)
force = True
self._update_params(params=changed, force=force)
return None
def _send_break(self, duration=1000):
"""
Send a BREAK to attempt to wake the device.
"""
self._linebuf = ''
self._promptbuf = ''
self._send_break_cmd(duration)
self._get_response(expected_prompt=WorkhorsePrompt.BREAK)
def _send_wakeup(self):
"""
Send a newline to attempt to wake the device.
"""
self._connection.send(NEWLINE)
def _start_logging(self, timeout=TIMEOUT):
"""
Command the instrument to start logging
@param timeout: how long to wait for a prompt
@throws: InstrumentProtocolException if failed to start logging
"""
self._do_cmd_resp(WorkhorseInstrumentCmds.START_LOGGING, timeout=timeout)
def _stop_logging(self):
self._send_break()
def _discover(self):
"""
Discover current state; can be COMMAND or AUTOSAMPLE or UNKNOWN.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentStateException if the device response does not correspond to
an expected state.
"""
next_state = None
try:
self._wakeup(3)
next_state = WorkhorseProtocolState.COMMAND
except InstrumentTimeoutException:
# TODO - should verify that a particle is being received (e.g. wait_for_particle for 1 sec, otherwise throw exception
next_state = WorkhorseProtocolState.AUTOSAMPLE
return next_state
def _run_test(self, *args, **kwargs):
kwargs['timeout'] = 30
kwargs['expected_prompt'] = WorkhorsePrompt.COMMAND
return self._do_cmd_resp(WorkhorseInstrumentCmds.RUN_TEST_200, *args, **kwargs)
@contextmanager
def _pause_logging(self):
self._send_break()
try:
yield
finally:
self._start_logging()
########################################################################
# UNKNOWN handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can be COMMAND or AUTOSAMPLE.
"""
next_state = self._discover()
result = []
return next_state, (next_state, result)
########################################################################
# COMMAND handlers.
########################################################################
def _handler_command_run_test_200(self, *args, **kwargs):
next_state = None
result = self._run_test(*args, **kwargs)
return next_state, (next_state, [result])
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
log.debug('_handler_command_enter: init_type: %r', self._init_type)
if self._init_type != InitializationType.NONE:
self._update_params()
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
"""
next_state = WorkhorseProtocolState.AUTOSAMPLE
result = []
# Issue start command and switch to autosample if successful.
try:
self._sync_clock(WorkhorseInstrumentCmds.SET, WorkhorseParameter.TIME)
self._start_logging()
except InstrumentException:
self._stop_logging()
raise
return next_state, (next_state, result)
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : params dict.
@throws InstrumentParameterException if missing set parameters, if set parameters not ALL and
not a dict, or if parameter can't be properly formatted.
@throws InstrumentTimeoutException if device cannot be woken for set command.
@throws InstrumentProtocolException if set command could not be built or misunderstood.
"""
next_state = None
result = []
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('_handler_command_set Set command requires a parameter dict.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
self._set_params(params, startup)
return next_state, result
def _handler_command_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change
"""
next_state = None
result = []
timeout = kwargs.get('timeout', TIMEOUT)
self._sync_clock(WorkhorseInstrumentCmds.SET, WorkhorseParameter.TIME, timeout)
return next_state, (next_state, result)
def _do_acquire_status(self, *args, **kwargs):
self._do_cmd_resp(WorkhorseInstrumentCmds.GET_SYSTEM_CONFIGURATION, *args, **kwargs),
self._do_cmd_resp(WorkhorseInstrumentCmds.OUTPUT_CALIBRATION_DATA, *args, **kwargs),
self._do_cmd_resp(WorkhorseInstrumentCmds.OUTPUT_PT2, *args, **kwargs),
self._do_cmd_resp(WorkhorseInstrumentCmds.OUTPUT_PT4, *args, **kwargs)
def _handler_command_acquire_status(self, *args, **kwargs):
"""
execute a get status
@return next_state, next_state, result) if successful.
@throws InstrumentProtocolException from _do_cmd_resp.
"""
next_state = None
self._do_acquire_status(*args, **kwargs)
result = self.wait_for_particles([WorkhorseDataParticleType.ADCP_SYSTEM_CONFIGURATION,
WorkhorseDataParticleType.ADCP_COMPASS_CALIBRATION,
WorkhorseDataParticleType.ADCP_ANCILLARY_SYSTEM_DATA,
WorkhorseDataParticleType.ADCP_TRANSMIT_PATH])
return next_state, (next_state, result)
def _handler_command_start_direct(self, *args, **kwargs):
next_state = WorkhorseProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
def _handler_command_recover_autosample(self):
next_state = WorkhorseProtocolState.AUTOSAMPLE
result = []
return next_state, (next_state, result)
######################################################
# AUTOSAMPLE handlers
######################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
if self._init_type != InitializationType.NONE:
with self._pause_logging():
self._update_params()
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
incorrect prompt received.
"""
next_state = WorkhorseProtocolState.COMMAND
result = []
self._stop_logging()
return next_state, (next_state, result)
def _handler_autosample_clock_sync(self, *args, **kwargs):
"""
execute a clock sync on the leading edge of a second change from
autosample mode. For this command we have to move the instrument
into command mode, do the clock sync, then switch back. If an
exception is thrown we will try to get ourselves back into
streaming and then raise that exception.
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
result = []
with self._pause_logging():
self._handler_command_clock_sync()
return next_state, (next_state, result)
def _handler_autosample_acquire_status(self, *args, **kwargs):
"""
execute a get status on the leading edge of a second change
@throws InstrumentProtocolException from _do_cmd_resp
"""
next_state = None
result = []
with self._pause_logging():
self._handler_command_acquire_status(*args, **kwargs)
return next_state, (next_state, result)
######################################################
# DIRECT_ACCESS handlers
######################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
self._stop_logging()
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = WorkhorseProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# build handlers.
########################################################################
def _build_set_command(self, cmd, param, val):
"""
Build handler for set commands. param=val followed by newline.
String val constructed by param dict formatting function.
@param param the parameter key to set.
@param val the parameter value to set.
@return The set command to be sent to the device.
@throws InstrumentProtocolException if the parameter is not valid or
if the formatting function could not accept the value passed.
"""
try:
str_val = self._param_dict.format(param, val)
set_cmd = '%s%s%s' % (param, str_val, NEWLINE)
except KeyError:
raise InstrumentParameterException('Unknown driver parameter. %s' % param)
except ValueError:
raise InstrumentParameterException('Cannot format parameter value: %s %s' % (param, val))
return set_cmd
def _build_get_command(self, cmd, param, **kwargs):
"""
param? followed by newline.
@param cmd get command
@param param the parameter key to get.
@return The get command to be sent to the device.
"""
self.get_param = param
get_cmd = param + '?' + NEWLINE
return get_cmd
########################################################################
# response handlers.
########################################################################
def _response_passthrough(self, response, prompt):
"""
Return the output from the calibration request base 64 encoded
"""
return response
def _parse_get_response(self, response, prompt):
if 'ERR' in response:
raise InstrumentProtocolException(
'Protocol._parse_get_response : Get command not recognized: %s' % response)
self._param_dict.update(response)
return response
def _parse_set_response(self, response, prompt):
"""
Parse handler for set command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if set command misunderstood.
"""
if 'ERR' in response:
raise InstrumentParameterException('Error setting parameter: %s' % response)
return response
def create_playback_protocol(callback):
return WorkhorseProtocol(None, None, callback)
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
import testtools
import webob
from jacket.api.compute.openstack.compute import evacuate as evacuate_v21
from jacket.api.compute.openstack.compute.legacy_v2.contrib import evacuate \
as evacuate_v2
from jacket.api.compute.openstack import extensions
from jacket.compute.cloud import api as compute_api
from jacket.compute.cloud import vm_states
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit import fake_instance
CONF = cfg.CONF
CONF.import_opt('password_length', 'compute.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id, want_objects=False,
**kwargs):
# BAD_UUID is something that does not exist
if instance_id == 'BAD_UUID':
raise exception.InstanceNotFound(instance_id=instance_id)
else:
return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
task_state=None, host='host1',
vm_state=vm_states.ACTIVE)
def fake_service_get_by_compute_host(self, context, host):
if host == 'bad-host':
raise exception.ComputeHostNotFound(host=host)
else:
return {
'host_name': host,
'service': 'compute',
'zone': 'compute'
}
class EvacuateTestV21(test.NoDBTestCase):
validation_error = exception.ValidationError
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self._set_up_controller()
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = evacuate_v21.EvacuateController()
self.controller_no_ext = self.controller
def _get_evacuate_response(self, json_load, uuid=None):
base_json_load = {'evacuate': json_load}
response = self.controller._evacuate(self.admin_req, uuid or self.UUID,
body=base_json_load)
return response
def _check_evacuate_failure(self, exception, body, uuid=None,
controller=None):
controller = controller or self.controller
body = {'evacuate': body}
self.assertRaises(exception,
controller._evacuate,
self.admin_req, uuid or self.UUID, body=body)
def test_evacuate_with_valid_instance(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_with_invalid_instance(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
uuid='BAD_UUID')
def test_evacuate_with_active_service(self):
def fake_evacuate(*args, **kwargs):
raise exception.ComputeServiceInUse("Service still in use")
self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_no_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_instance_without_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_characters_host(self):
host = 'abc!#'
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_too_long_host(self):
host = 'a' * 256
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'onSharedStorage': 'foo',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_bad_target(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'bad-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
@mock.patch('compute.objects.Instance.save')
def test_evacuate_shared_and_pass(self, mock_save):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'bad-host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'})
@mock.patch('compute.objects.Instance.save')
def test_evacuate_not_shared_pass_generated(self, mock_save):
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False'})
self.assertEqual(CONF.password_length, len(res['adminPass']))
@mock.patch('compute.objects.Instance.save')
def test_evacuate_shared(self, mock_save):
self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'True'})
def test_not_admin(self):
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._evacuate,
self.req, self.UUID, body=body)
def test_evacuate_to_same_host(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'host1',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_empty_host(self):
self._check_evacuate_failure(self.validation_error,
{'host': '',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
@mock.patch('compute.objects.Instance.save')
def test_evacuate_instance_with_underscore_in_hostname(self, mock_save):
admin_pass = 'MyNewPass'
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_disable_password_return(self):
self._test_evacuate_enable_instance_password_conf(False)
def test_evacuate_enable_password_return(self):
self._test_evacuate_enable_instance_password_conf(True)
@mock.patch('compute.objects.Instance.save')
def _test_evacuate_enable_instance_password_conf(self, mock_save,
enable_pass):
self.flags(enable_instance_password=enable_pass)
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False'})
if enable_pass:
self.assertIn('adminPass', res)
else:
self.assertIsNone(res.get('adminPass'))
class EvacuateTestV2(EvacuateTestV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-extended-evacuate-find-host': 'fake'}
self.controller = evacuate_v2.Controller(ext_mgr)
ext_mgr_no_ext = extensions.ExtensionManager()
ext_mgr_no_ext.extensions = {}
self.controller_no_ext = evacuate_v2.Controller(ext_mgr_no_ext)
def test_no_target_fails_if_extension_not_loaded(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
def test_evacuate_instance_with_too_long_host(self):
pass
def test_evacuate_instance_with_invalid_characters_host(self):
pass
def test_evacuate_instance_with_invalid_on_shared_storage(self):
pass
def test_evacuate_disable_password_return(self):
pass
def test_evacuate_enable_password_return(self):
pass
def tet_evacuate_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.evacuate,
self.req, fakes.FAKE_UUID, {})
class EvacuatePolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(EvacuatePolicyEnforcementv21, self).setUp()
self.controller = evacuate_v21.EvacuateController()
def test_evacuate_policy_failed(self):
rule_name = "os_compute_api:os-evacuate"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._evacuate, req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class EvacuateTestV214(EvacuateTestV21):
def setUp(self):
super(EvacuateTestV214, self).setUp()
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
version='2.14')
self.req = fakes.HTTPRequest.blank('', version='2.14')
def _get_evacuate_response(self, json_load, uuid=None):
json_load.pop('onSharedStorage', None)
base_json_load = {'evacuate': json_load}
response = self.controller._evacuate(self.admin_req, uuid or self.UUID,
body=base_json_load)
return response
def _check_evacuate_failure(self, exception, body, uuid=None,
controller=None):
controller = controller or self.controller
body.pop('onSharedStorage', None)
body = {'evacuate': body}
self.assertRaises(exception,
controller._evacuate,
self.admin_req, uuid or self.UUID, body=body)
@mock.patch.object(compute_api.API, 'evacuate')
def test_evacuate_instance(self, mock_evacuate):
self._get_evacuate_response({})
admin_pass = mock_evacuate.call_args_list[0][0][4]
on_shared_storage = mock_evacuate.call_args_list[0][0][3]
self.assertEqual(CONF.password_length, len(admin_pass))
self.assertIsNone(on_shared_storage)
def test_evacuate_with_valid_instance(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'adminPass': admin_pass})
self.assertIsNone(res)
@testtools.skip('Password is not returned from Microversion 2.14')
def test_evacuate_disable_password_return(self):
pass
@testtools.skip('Password is not returned from Microversion 2.14')
def test_evacuate_enable_password_return(self):
pass
@testtools.skip('onSharedStorage was removed from Microversion 2.14')
def test_evacuate_instance_with_invalid_on_shared_storage(self):
pass
@testtools.skip('onSharedStorage was removed from Microversion 2.14')
@mock.patch('compute.objects.Instance.save')
def test_evacuate_not_shared_pass_generated(self, mock_save):
pass
@mock.patch.object(compute_api.API, 'evacuate')
@mock.patch('compute.objects.Instance.save')
def test_evacuate_pass_generated(self, mock_save, mock_evacuate):
self._get_evacuate_response({'host': 'my-host'})
self.assertEqual(CONF.password_length,
len(mock_evacuate.call_args_list[0][0][4]))
def test_evacuate_instance_without_on_shared_storage(self):
self._get_evacuate_response({'host': 'my-host',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_no_target(self):
admin_pass = 'MyNewPass'
with mock.patch.object(compute_api.API, 'evacuate') as mock_evacuate:
self._get_evacuate_response({'adminPass': admin_pass})
self.assertEqual(admin_pass,
mock_evacuate.call_args_list[0][0][4])
def test_not_admin(self):
body = {'evacuate': {'host': 'my-host'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._evacuate,
self.req, self.UUID, body=body)
@testtools.skip('onSharedStorage was removed from Microversion 2.14')
@mock.patch('compute.objects.Instance.save')
def test_evacuate_shared_and_pass(self, mock_save):
pass
@testtools.skip('from Microversion 2.14 it is covered with '
'test_evacuate_pass_generated')
def test_evacuate_instance_with_target(self):
pass
@mock.patch('compute.objects.Instance.save')
def test_evacuate_instance_with_underscore_in_hostname(self, mock_save):
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
self._get_evacuate_response({'host': 'underscore_hostname'})
| |
"""Test functions for the sparse.linalg.interface module
"""
from __future__ import division, print_function, absolute_import
from functools import partial
from itertools import product
import operator
import pytest
from pytest import raises as assert_raises, warns
from numpy.testing import assert_, assert_equal
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import interface
from scipy.sparse.sputils import matrix
# Only test matmul operator (A @ B) when available (Python 3.5+)
TEST_MATMUL = hasattr(operator, 'matmul')
class TestLinearOperator(object):
def setup_method(self):
self.A = np.array([[1,2,3],
[4,5,6]])
self.B = np.array([[1,2],
[3,4],
[5,6]])
self.C = np.array([[1,2],
[3,4]])
def test_matvec(self):
def get_matvecs(A):
return [{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]),
'rmatvec': lambda x: np.dot(A.T.conj(),
x).reshape(A.shape[1])
},
{
'shape': A.shape,
'matvec': lambda x: np.dot(A, x),
'rmatvec': lambda x: np.dot(A.T.conj(), x),
'rmatmat': lambda x: np.dot(A.T.conj(), x),
'matmat': lambda x: np.dot(A, x)
}]
for matvecs in get_matvecs(self.A):
A = interface.LinearOperator(**matvecs)
assert_(A.args == ())
assert_equal(A.matvec(np.array([1,2,3])), [14,32])
assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A * np.array([1,2,3]), [14,32])
assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(np.array([1,2,3])), [14,32])
assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]])
assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]])
assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]])
assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]])
assert_equal((2*A)*[1,1,1], [12,30])
assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18])
assert_equal((2*A).H.matvec([1,1]), [10, 14, 18])
assert_equal((2*A)*[[1],[1],[1]], [[12],[30]])
assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]])
assert_equal((A*2)*[1,1,1], [12,30])
assert_equal((A*2)*[[1],[1],[1]], [[12],[30]])
assert_equal((2j*A)*[1,1,1], [12j,30j])
assert_equal((A+A)*[1,1,1], [12, 30])
assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18])
assert_equal((A+A).H.matvec([1,1]), [10, 14, 18])
assert_equal((A+A)*[[1],[1],[1]], [[12], [30]])
assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]])
assert_equal((-A)*[1,1,1], [-6,-15])
assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]])
assert_equal((A-A)*[1,1,1], [0,0])
assert_equal((A - A) * [[1], [1], [1]], [[0], [0]])
X = np.array([[1, 2], [3, 4]])
# A_asarray = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X))
assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X))
assert_equal((2j * A).rmatmat(X),
np.dot((2j * self.A).T.conj(), X))
assert_equal((A * 2j).rmatmat(X),
np.dot((self.A * 2j).T.conj(), X))
assert_equal((A + A).rmatmat(X),
np.dot((self.A + self.A).T, X))
assert_equal((A + 2j * A).rmatmat(X),
np.dot((self.A + 2j * self.A).T.conj(), X))
assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X))
assert_equal((A - A).rmatmat(X),
np.dot((self.A - self.A).T, X))
assert_equal((2j * A).rmatmat(2j * X),
np.dot((2j * self.A).T.conj(), 2j * X))
z = A+A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A)
z = 2*A
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2)
assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray))
assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * np.array([1,2,3]), np.ndarray))
assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray))
assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray))
assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray))
assert_(isinstance(2*A, interface._ScaledLinearOperator))
assert_(isinstance(2j*A, interface._ScaledLinearOperator))
assert_(isinstance(A+A, interface._SumLinearOperator))
assert_(isinstance(-A, interface._ScaledLinearOperator))
assert_(isinstance(A-A, interface._SumLinearOperator))
assert_((2j*A).dtype == np.complex_)
assert_raises(ValueError, A.matvec, np.array([1,2]))
assert_raises(ValueError, A.matvec, np.array([1,2,3,4]))
assert_raises(ValueError, A.matvec, np.array([[1],[2]]))
assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]]))
assert_raises(ValueError, lambda: A*A)
assert_raises(ValueError, lambda: A**2)
for matvecsA, matvecsB in product(get_matvecs(self.A),
get_matvecs(self.B)):
A = interface.LinearOperator(**matvecsA)
B = interface.LinearOperator(**matvecsB)
# AtimesB = np.array([[22, 28], [49, 64]])
AtimesB = self.A.dot(self.B)
X = np.array([[1, 2], [3, 4]])
assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X))
assert_equal((2j * A * B).rmatmat(X),
np.dot((2j * AtimesB).T.conj(), X))
assert_equal((A*B)*[1,1], [50,113])
assert_equal((A*B)*[[1],[1]], [[50],[113]])
assert_equal((A*B).matmat([[1],[1]]), [[50],[113]])
assert_equal((A * B).rmatvec([1, 1]), [71, 92])
assert_equal((A * B).H.matvec([1, 1]), [71, 92])
assert_(isinstance(A*B, interface._ProductLinearOperator))
assert_raises(ValueError, lambda: A+B)
assert_raises(ValueError, lambda: A**2)
z = A*B
assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B)
for matvecsC in get_matvecs(self.C):
C = interface.LinearOperator(**matvecsC)
X = np.array([[1, 2], [3, 4]])
assert_equal(C.rmatmat(X), np.dot((self.C).T, X))
assert_equal((C**2).rmatmat(X),
np.dot((np.dot(self.C, self.C)).T, X))
assert_equal((C**2)*[1,1], [17,37])
assert_equal((C**2).rmatvec([1, 1]), [22, 32])
assert_equal((C**2).H.matvec([1, 1]), [22, 32])
assert_equal((C**2).matmat([[1],[1]]), [[17],[37]])
assert_(isinstance(C**2, interface._PowerLinearOperator))
def test_matmul(self):
if not TEST_MATMUL:
pytest.skip("matmul is only tested in Python 3.5+")
D = {'shape': self.A.shape,
'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]),
'rmatvec': lambda x: np.dot(self.A.T.conj(),
x).reshape(self.A.shape[1]),
'rmatmat': lambda x: np.dot(self.A.T.conj(), x),
'matmat': lambda x: np.dot(self.A, x)}
A = interface.LinearOperator(**D)
B = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = B[0]
assert_equal(operator.matmul(A, b), A * b)
assert_equal(operator.matmul(A, B), A * B)
assert_raises(ValueError, operator.matmul, A, 2)
assert_raises(ValueError, operator.matmul, 2, A)
class TestAsLinearOperator(object):
def setup_method(self):
self.cases = []
def make_cases(original, dtype):
cases = []
cases.append((matrix(original, dtype=dtype), original))
cases.append((np.array(original, dtype=dtype), original))
cases.append((sparse.csr_matrix(original, dtype=dtype), original))
# Test default implementations of _adjoint and _rmatvec, which
# refer to each other.
def mv(x, dtype):
y = original.dot(x)
if len(x.shape) == 2:
y = y.reshape(-1, 1)
return y
def rmv(x, dtype):
return original.T.conj().dot(x)
class BaseMatlike(interface.LinearOperator):
args = ()
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
self.shape = original.shape
def _matvec(self, x):
return mv(x, self.dtype)
class HasRmatvec(BaseMatlike):
args = ()
def _rmatvec(self,x):
return rmv(x, self.dtype)
class HasAdjoint(BaseMatlike):
args = ()
def _adjoint(self):
shape = self.shape[1], self.shape[0]
matvec = partial(rmv, dtype=self.dtype)
rmatvec = partial(mv, dtype=self.dtype)
return interface.LinearOperator(matvec=matvec,
rmatvec=rmatvec,
dtype=self.dtype,
shape=shape)
class HasRmatmat(HasRmatvec):
def _matmat(self, x):
return original.dot(x)
def _rmatmat(self, x):
return original.T.conj().dot(x)
cases.append((HasRmatvec(dtype), original))
cases.append((HasAdjoint(dtype), original))
cases.append((HasRmatmat(dtype), original))
return cases
original = np.array([[1,2,3], [4,5,6]])
self.cases += make_cases(original, np.int32)
self.cases += make_cases(original, np.float32)
self.cases += make_cases(original, np.float64)
self.cases += [(interface.aslinearoperator(M).T, A.T)
for M, A in make_cases(original.T, np.float64)]
self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
for M, A in make_cases(original.T, np.float64)]
original = np.array([[1, 2j, 3j], [4j, 5j, 6]])
self.cases += make_cases(original, np.complex_)
self.cases += [(interface.aslinearoperator(M).T, A.T)
for M, A in make_cases(original.T, np.complex_)]
self.cases += [(interface.aslinearoperator(M).H, A.T.conj())
for M, A in make_cases(original.T, np.complex_)]
def test_basic(self):
for M, A_array in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
xs = [np.array([1, 2, 3]),
np.array([[1], [2], [3]])]
ys = [np.array([1, 2]), np.array([[1], [2]])]
if A.dtype == np.complex_:
xs += [np.array([1, 2j, 3j]),
np.array([[1], [2j], [3j]])]
ys += [np.array([1, 2j]), np.array([[1], [2j]])]
x2 = np.array([[1, 4], [2, 5], [3, 6]])
for x in xs:
assert_equal(A.matvec(x), A_array.dot(x))
assert_equal(A * x, A_array.dot(x))
assert_equal(A.matmat(x2), A_array.dot(x2))
assert_equal(A * x2, A_array.dot(x2))
for y in ys:
assert_equal(A.rmatvec(y), A_array.T.conj().dot(y))
assert_equal(A.T.matvec(y), A_array.T.dot(y))
assert_equal(A.H.matvec(y), A_array.T.conj().dot(y))
for y in ys:
if y.ndim < 2:
continue
assert_equal(A.rmatmat(y), A_array.T.conj().dot(y))
assert_equal(A.T.matmat(y), A_array.T.dot(y))
assert_equal(A.H.matmat(y), A_array.T.conj().dot(y))
if hasattr(M,'dtype'):
assert_equal(A.dtype, M.dtype)
assert_(hasattr(A, 'args'))
def test_dot(self):
for M, A_array in self.cases:
A = interface.aslinearoperator(M)
M,N = A.shape
x0 = np.array([1, 2, 3])
x1 = np.array([[1], [2], [3]])
x2 = np.array([[1, 4], [2, 5], [3, 6]])
assert_equal(A.dot(x0), A_array.dot(x0))
assert_equal(A.dot(x1), A_array.dot(x1))
assert_equal(A.dot(x2), A_array.dot(x2))
def test_repr():
A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1)
repr_A = repr(A)
assert_('unspecified dtype' not in repr_A, repr_A)
def test_identity():
ident = interface.IdentityOperator((3, 3))
assert_equal(ident * [1, 2, 3], [1, 2, 3])
assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9))
assert_raises(ValueError, ident.matvec, [1, 2, 3, 4])
def test_attributes():
A = interface.aslinearoperator(np.arange(16).reshape(4, 4))
def always_four_ones(x):
x = np.asarray(x)
assert_(x.shape == (3,) or x.shape == (3, 1))
return np.ones(4)
B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones)
for op in [A, B, A * B, A.H, A + A, B + B, A**4]:
assert_(hasattr(op, "dtype"))
assert_(hasattr(op, "shape"))
assert_(hasattr(op, "_matvec"))
def matvec(x):
""" Needed for test_pickle as local functions are not pickleable """
return np.zeros(3)
def test_pickle():
import pickle
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
A = interface.LinearOperator((3, 3), matvec)
s = pickle.dumps(A, protocol=protocol)
B = pickle.loads(s)
for k in A.__dict__:
assert_equal(getattr(A, k), getattr(B, k))
def test_inheritance():
class Empty(interface.LinearOperator):
pass
with warns(RuntimeWarning, match="should implement at least"):
assert_raises(TypeError, Empty)
class Identity(interface.LinearOperator):
def __init__(self, n):
super(Identity, self).__init__(dtype=None, shape=(n, n))
def _matvec(self, x):
return x
id3 = Identity(3)
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
class MatmatOnly(interface.LinearOperator):
def __init__(self, A):
super(MatmatOnly, self).__init__(A.dtype, A.shape)
self.A = A
def _matmat(self, x):
return self.A.dot(x)
mm = MatmatOnly(np.random.randn(5, 3))
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,))
def test_dtypes_of_operator_sum():
# gh-6078
mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2)
mat_real = np.random.rand(2,2)
complex_operator = interface.aslinearoperator(mat_complex)
real_operator = interface.aslinearoperator(mat_real)
sum_complex = complex_operator + complex_operator
sum_real = real_operator + real_operator
assert_equal(sum_real.dtype, np.float64)
assert_equal(sum_complex.dtype, np.complex128)
def test_no_double_init():
call_count = [0]
def matvec(v):
call_count[0] += 1
return v
# It should call matvec exactly once (in order to determine the
# operator dtype)
A = interface.LinearOperator((2, 2), matvec=matvec)
assert_equal(call_count[0], 1)
def test_adjoint_conjugate():
X = np.array([[1j]])
A = interface.aslinearoperator(X)
B = 1j * A
Y = 1j * X
v = np.array([1])
assert_equal(B.dot(v), Y.dot(v))
assert_equal(B.H.dot(v), Y.T.conj().dot(v))
def test_transpose_noconjugate():
X = np.array([[1j]])
A = interface.aslinearoperator(X)
B = 1j * A
Y = 1j * X
v = np.array([1])
assert_equal(B.dot(v), Y.dot(v))
assert_equal(B.T.dot(v), Y.T.dot(v))
| |
"""Deal with representations of Markov Models.
"""
# standard modules
import copy
import math
import random
# biopython
from Bio.Seq import MutableSeq
class MarkovModelBuilder:
"""Interface to build up a Markov Model.
This class is designed to try to separate the task of specifying the
Markov Model from the actual model itself. This is in hopes of making
the actual Markov Model classes smaller.
So, this builder class should be used to create Markov models instead
of trying to initiate a Markov Model directly.
"""
# the default pseudo counts to use
DEFAULT_PSEUDO = 1
def __init__(self, state_alphabet, emission_alphabet):
"""Initialize a builder to create Markov Models.
Arguments:
o state_alphabet -- An alphabet containing all of the letters that
can appear in the states
o emission_alphabet -- An alphabet containing all of the letters for
states that can be emitted by the HMM.
"""
self._state_alphabet = state_alphabet
self._emission_alphabet = emission_alphabet
# the probabilities for transitions and emissions
# by default we have no transitions and all possible emissions
self.transition_prob = {}
self.emission_prob = self._all_blank(state_alphabet,
emission_alphabet)
# the default pseudocounts for transition and emission counting
self.transition_pseudo = {}
self.emission_pseudo = self._all_pseudo(state_alphabet,
emission_alphabet)
def _all_blank(self, first_alphabet, second_alphabet):
"""Return a dictionary with all counts set to zero.
This uses the letters in the first and second alphabet to create
a dictionary with keys of two tuples organized as
(letter of first alphabet, letter of second alphabet). The values
are all set to 0.
"""
all_blank = {}
for first_state in first_alphabet.letters:
for second_state in second_alphabet.letters:
all_blank[(first_state, second_state)] = 0
return all_blank
def _all_pseudo(self, first_alphabet, second_alphabet):
"""Return a dictionary with all counts set to a default value.
This takes the letters in first alphabet and second alphabet and
creates a dictionary with keys of two tuples organized as:
(letter of first alphabet, letter of second alphabet). The values
are all set to the value of the class attribute DEFAULT_PSEUDO.
"""
all_counts = {}
for first_state in first_alphabet.letters:
for second_state in second_alphabet.letters:
all_counts[(first_state, second_state)] = self.DEFAULT_PSEUDO
return all_counts
def get_markov_model(self):
"""Return the markov model corresponding with the current parameters.
Each markov model returned by a call to this function is unique
(ie. they don't influence each other).
"""
transition_prob = copy.deepcopy(self.transition_prob)
emission_prob = copy.deepcopy(self.emission_prob)
transition_pseudo = copy.deepcopy(self.transition_pseudo)
emission_pseudo = copy.deepcopy(self.emission_pseudo)
return HiddenMarkovModel(transition_prob, emission_prob,
transition_pseudo, emission_pseudo)
def set_equal_probabilities(self):
"""Reset all probabilities to be an average value.
This resets the values of all allowed transitions and all allowed
emissions to be equal to 1 divided by the number of possible elements.
This is useful if you just want to initialize a Markov Model to
starting values (ie. if you have no prior notions of what the
probabilities should be -- or if you are just feeling too lazy
to calculate them :-).
Warning 1 -- this will reset all currently set probabilities.
Warning 2 -- This just sets all probabilities for transitions and
emissions to total up to 1, so it doesn't ensure that the sum of
each set of transitions adds up to 1.
"""
# first set the transitions
new_trans_prob = float(1) / float(len(self.transition_prob.keys()))
for key in self.transition_prob.keys():
self.transition_prob[key] = new_trans_prob
# now set the emissions
new_emission_prob = float(1) / float(len(self.emission_prob.keys()))
for key in self.emission_prob.keys():
self.emission_prob[key] = new_emission_prob
def set_random_probabilities(self):
"""Set all probabilities to randomly generated numbers.
This will reset the value of all allowed transitions and emissions
to random values.
Warning 1 -- This will reset any currently set probabibilities.
Warning 2 -- This does not check to ensure that the sum of
all of the probabilities is less then 1. It just randomly assigns
a probability to each
"""
for key in self.transition_prob.keys():
self.transition_prob[key] = random.random()
for key in self.emission_prob.keys():
self.emission_prob[key] = random.random()
# --- functions to deal with the transitions in the sequence
def allow_all_transitions(self):
"""A convenience function to create transitions between all states.
By default all transitions within the alphabet are disallowed; this
is a way to change this to allow all possible transitions.
"""
# first get all probabilities and pseudo counts set
# to the default values
all_probs = self._all_blank(self._state_alphabet,
self._state_alphabet)
all_pseudo = self._all_pseudo(self._state_alphabet,
self._state_alphabet)
# now set any probabilities and pseudo counts that
# were previously set
for set_key in self.transition_prob.keys():
all_probs[set_key] = self.transition_prob[set_key]
for set_key in self.transition_pseudo.keys():
all_pseudo[set_key] = self.transition_pseudo[set_key]
# finally reinitialize the transition probs and pseudo counts
self.transition_prob = all_probs
self.transition_pseudo = all_pseudo
def allow_transition(self, from_state, to_state, probability = None,
pseudocount = None):
"""Set a transition as being possible between the two states.
probability and pseudocount are optional arguments
specifying the probabilities and pseudo counts for the transition.
If these are not supplied, then the values are set to the
default values.
Raises:
KeyError -- if the two states already have an allowed transition.
"""
# check the sanity of adding these states
for state in [from_state, to_state]:
assert state in self._state_alphabet, \
"State %s was not found in the sequence alphabet" % state
# ensure that the states are not already set
if ((from_state, to_state) not in self.transition_prob.keys() and
(from_state, to_state) not in self.transition_pseudo.keys()):
# set the initial probability
if probability is None:
probability = 0
self.transition_prob[(from_state, to_state)] = probability
# set the initial pseudocounts
if pseudocount is None:
pseudcount = self.DEFAULT_PSEUDO
self.transition_pseudo[(from_state, to_state)] = pseudocount
else:
raise KeyError("Transtion from %s to %s is already allowed."
% (from_state, to_state))
def destroy_transition(self, from_state, to_state):
"""Restrict transitions between the two states.
Raises:
KeyError if the transition is not currently allowed.
"""
try:
del self.transition_prob[(from_state, to_state)]
del self.transition_pseudo[(from_state, to_state)]
except KeyError:
raise KeyError("Transition from %s to %s is already disallowed."
% (from_state, to_state))
def set_transition_score(self, from_state, to_state, probability):
"""Set the probability of a transition between two states.
Raises:
KeyError if the transition is not allowed.
"""
if self.transition_prob.has_key((from_state, to_state)):
self.transition_prob[(from_state, to_state)] = probability
else:
raise KeyError("Transition from %s to %s is not allowed."
% (from_state, to_state))
def set_transition_pseudocount(self, from_state, to_state, count):
"""Set the default pseudocount for a transition.
To avoid computational problems, it is helpful to be able to
set a 'default' pseudocount to start with for estimating
transition and emission probabilities (see p62 in Durbin et al
for more discussion on this. By default, all transitions have
a pseudocount of 1.
Raises:
KeyError if the transition is not allowed.
"""
if self.transition_pseudo.has_key((from_state, to_state)):
self.transition_pseudo[(from_state, to_state)] = count
else:
raise KeyError("Transition from %s to %s is not allowed."
% (from_state, to_state))
# --- functions to deal with emissions from the sequence
def set_emission_score(self, seq_state, emission_state, probability):
"""Set the probability of a emission from a particular state.
Raises:
KeyError if the emission from the given state is not allowed.
"""
if self.emission_prob.has_key((seq_state, emission_state)):
self.emission_prob[(seq_state, emission_state)] = probability
else:
raise KeyError("Emission of %s from %s is not allowed."
% (emission_state, seq_state))
def set_emission_pseudocount(self, seq_state, emission_state, count):
"""Set the default pseudocount for an emission.
To avoid computational problems, it is helpful to be able to
set a 'default' pseudocount to start with for estimating
transition and emission probabilities (see p62 in Durbin et al
for more discussion on this. By default, all emissions have
a pseudocount of 1.
Raises:
KeyError if the emission from the given state is not allowed.
"""
if self.emission_pseudo.has_key((seq_state, emission_state)):
self.emission_pseudo[(seq_state, emission_state)] = count
else:
raise KeyError("Emission of %s from %s is not allowed."
% (emission_state, seq_state))
class HiddenMarkovModel:
"""Represent a hidden markov model that can be used for state estimation.
"""
def __init__(self, transition_prob, emission_prob, transition_pseudo,
emission_pseudo):
"""Initialize a Markov Model.
Note: You should use the MarkovModelBuilder class instead of
initiating this class directly.
Arguments:
o transition_prob -- A dictionary of transition probabilities for all
possible transitions in the sequence.
o emission_prob -- A dictionary of emissions probabilities for all
possible emissions from the sequence states.
o transition_pseudo -- Pseudo-counts to be used for the transitions,
when counting for purposes of estimating transition probabilities.
o emission_pseduo -- Pseudo-counts fo tbe used for the emissions,
when counting for purposes of estimating emission probabilities.
"""
self._transition_pseudo = transition_pseudo
self._emission_pseudo = emission_pseudo
self.transition_prob = transition_prob
self.emission_prob = emission_prob
# a dictionary of the possible transitions from one letter to the next
# the keys are the letter, and the values are lists of letters that
# can be transitioned to
self._transitions_from = \
self._calculate_from_transitions(self.transition_prob)
def _calculate_from_transitions(self, trans_probs):
"""Calculate which 'from transitions' are allowed for each letter.
This looks through all of the trans_probs, and uses this dictionary
to determine allowed transitions. It converts this information into
a dictionary, whose keys are the transition letters and whose
values are a list of allowed letters to transition to.
"""
from_transitions = {}
# loop over all of the different transitions
for trans_key in trans_probs.keys():
# if the letter to 'transition from' already exists, add the
# new letter which can be 'transitioned to' to the list
try:
from_transitions[trans_key[0]].append(trans_key[1])
# otherwise create the list and add the letter
except KeyError:
from_transitions[trans_key[0]] = []
from_transitions[trans_key[0]].append(trans_key[1])
return from_transitions
def get_blank_transitions(self):
"""Get the default transitions for the model.
Returns a dictionary of all of the default transitions between any
two letters in the sequence alphabet. The dictionary is structured
with keys as (letter1, letter2) and values as the starting number
of transitions.
"""
return self._transition_pseudo
def get_blank_emissions(self):
"""Get the starting default emmissions for each sequence.
This returns a dictionary of the default emmissions for each
letter. The dictionary is structured with keys as
(seq_letter, emmission_letter) and values as the starting number
of emmissions.
"""
return self._emission_pseudo
def transitions_from(self, state_letter):
"""Get all transitions which can happen from the given state.
This returns all letters which the given state_letter is allowed
to transition to. An empty list is returned if no letters are possible.
"""
try:
return self._transitions_from[state_letter]
except KeyError:
return []
def viterbi(self, sequence, state_alphabet):
"""Calculate the most probable state path using the Viterbi algorithm.
This implements the Viterbi algorithm (see pgs 55-57 in Durbin et
al for a full explanation -- this is where I took my implementation
ideas from), to allow decoding of the state path, given a sequence
of emissions.
Arguments:
o sequence -- A Seq object with the emission sequence that we
want to decode.
o state_alphabet -- The alphabet of the possible state sequences
that can be generated.
"""
# calculate logarithms of the transition and emission probs
log_trans = self._log_transform(self.transition_prob)
log_emission = self._log_transform(self.emission_prob)
viterbi_probs = {}
pred_state_seq = {}
state_letters = state_alphabet.letters
# --- initialization
#
# NOTE: My index numbers are one less than what is given in Durbin
# et al, since we are indexing the sequence going from 0 to
# (Length - 1) not 1 to Length, like in Durbin et al.
#
# v_{0}(0) = 1
viterbi_probs[(state_letters[0], -1)] = 1
# v_{k}(0) = 0 for k > 0
for state_letter in state_letters[1:]:
viterbi_probs[(state_letter, -1)] = 0
# --- recursion
# loop over the training squence (i = 1 .. L)
for i in range(0, len(sequence)):
# now loop over all of the letters in the state path
for main_state in state_letters:
# e_{l}(x_{i})
emission_part = log_emission[(main_state, sequence[i])]
# loop over all possible states
possible_state_probs = {}
for cur_state in self.transitions_from(main_state):
# a_{kl}
trans_part = log_trans[(cur_state, main_state)]
# v_{k}(i - 1)
viterbi_part = viterbi_probs[(cur_state, i - 1)]
cur_prob = viterbi_part + trans_part
possible_state_probs[cur_state] = cur_prob
# finally calculate the viterbi probability using the max
max_prob = max(possible_state_probs.values())
viterbi_probs[(main_state, i)] = (emission_part + max_prob)
# now get the most likely state
for state in possible_state_probs.keys():
if possible_state_probs[state] == max_prob:
pred_state_seq[(i - 1, main_state)] = state
break
# --- termination
# calculate the probability of the state path
# loop over all letters
all_probs = {}
for state in state_letters:
# v_{k}(L)
viterbi_part = viterbi_probs[(state, len(sequence) - 1)]
# a_{k0}
transition_part = log_trans[(state, state_letters[0])]
all_probs[state] = viterbi_part * transition_part
state_path_prob = max(all_probs.values())
# find the last pointer we need to trace back from
last_state = ''
for state in all_probs.keys():
if all_probs[state] == state_path_prob:
last_state = state
assert last_state != '', "Didn't find the last state to trace from!"
# --- traceback
traceback_seq = MutableSeq('', state_alphabet)
loop_seq = range(0, len(sequence))
loop_seq.reverse()
cur_state = last_state
for i in loop_seq:
traceback_seq.append(cur_state)
cur_state = pred_state_seq[(i - 1, cur_state)]
# put the traceback sequence in the proper orientation
traceback_seq.reverse()
return traceback_seq.toseq(), state_path_prob
def _log_transform(self, probability):
"""Return log transform of the given probability dictionary.
When calculating the Viterbi equation, we need to deal with things
as sums of logs instead of products of probabilities, so that we
don't get underflow errors.. This copies the given probability
dictionary and returns the same dictionary with everything
transformed with a log.
"""
log_prob = copy.copy(probability)
for key in log_prob.keys():
log_prob[key] = math.log(log_prob[key])
return log_prob
| |
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from django.views.decorators.http import require_http_methods
from django.template import RequestContext
from passlib.apps import django_context as pwd_context
from beetle.models import Principal
from access.models import Rule, RuleException, DynamicAuth, PasscodeAuth
def __is_rule_exempt_helper(rule, principal):
return bool(RuleException.objects.filter(
Q(to_principal=principal) | Q(to_principal__name="*"),
rule=rule))
@require_http_methods(["GET", "POST"])
@transaction.atomic
def view_form_passcode(request, rule, principal):
"""Serve the password form and receive responses."""
context = RequestContext(request)
try:
rule = Rule.objects.get(name=rule)
principal = Principal.objects.get(name=principal)
passcode_auth = DynamicAuth.objects.instance_of(PasscodeAuth).get(
rule=rule)
except DynamicAuth.DoesNotExist:
context_dict = {
"title" : "An error occurred...",
"message" : "%s does not require passcode auth." % (rule.name,),
}
return render_to_response('state/empty_response.html',
context_dict, context)
except Exception, err:
context_dict = {
"title" : "An error occurred...",
"message" : str(err),
}
return render_to_response('state/empty_response.html',
context_dict, context)
now = timezone.now()
context = RequestContext(request)
context_dict = {
"rule" : rule,
"principal" : principal,
"auth" : passcode_auth
}
if (rule.to_principal.name != "*" and rule.to_principal != principal) or \
__is_rule_exempt_helper(rule, principal):
#######################
# Rule does not apply #
#######################
context_dict.update({
"status" : {
"a" : "Does Not Apply",
"b" : "denied",
}
})
return render_to_response('state/passcode_form_submit.html',
context_dict, context)
if request.method == "GET":
#######################
# Requesting the form #
#######################
if passcode_auth.code == "":
# No passcode, grant access
auth_instance, _ = PasscodeAuthInstance.objects.get_or_create(
principal=principal, rule=rule)
auth_instance.timestamp = now
auth_instance.expire = now + passcode_auth.session_length
auth_instance.save()
context_dict.update({
"status" : {
"a" : "Success",
"b" : "approved",
}
})
return render_to_response('state/passcode_form_submit.html',
context_dict, context)
else:
# Render the password form
return render_to_response('state/passcode_form.html',
context_dict, context)
elif request.method == "POST":
#######################
# Submitting the form #
#######################
code = request.POST["code"]
allowed = False
try:
allowed = pwd_context.verify(code, passcode_auth.chash)
except Exception:
pass
if not allowed:
context_dict.update({
"status" : {
"a" : "Invalid code",
"b" : "denied",
}
})
else:
auth_instance, _ = PasscodeAuthInstance.objects.get_or_create(
principal=principal, rule=rule)
auth_instance.timestamp = now
auth_instance.expire = now + passcode_auth.session_length
auth_instance.save()
context_dict.update({
"status" : {
"a" : "Success",
"b" : "approved",
}
})
return render_to_response('state/passcode_form_submit.html',
context_dict, context)
else:
return HttpResponse(status=403)
@require_http_methods(["GET","POST"])
@transaction.atomic
def view_form_passcode_generic(request, rule):
"""Serve the password form and receive responses."""
context = RequestContext(request)
try:
rule = Rule.objects.get(name=rule)
passcode_auth = DynamicAuth.objects.instance_of(PasscodeAuth).get(
rule=rule)
except DynamicAuth.DoesNotExist:
context_dict = {
"title" : "An error occurred...",
"message" : "%s does not require passcode auth." % (rule.name,),
}
return render_to_response('state/empty_response.html',
context_dict, context)
except Exception, err:
context_dict = {
"title" : "An error occurred...",
"message" : str(err),
}
return render_to_response('state/empty_response.html',
context_dict, context)
now = timezone.now()
context = RequestContext(request)
context_dict = {
"rule" : rule,
"auth" : passcode_auth
}
if request.method == "GET":
#######################
# Requesting the form #
#######################
# get list of principals that the rule can be applied to
if rule.to_principal.name == "*":
principals = Principal.objects.all().exclude(name="*")
else:
principals = [rule.to_principal]
principals = [x for x in principals if not __is_rule_exempt_helper(
rule, x)]
context_dict["principals"] = principals
return render_to_response('state/passcode_form_generic.html',
context_dict, context)
elif request.method == "POST":
#######################
# Submitting the form #
#######################
code = request.POST["code"]
principal = request.POST["principal"]
principal = Principal.objects.get(name=principal)
if (rule.to_principal.name != "*" and rule.to_principal != principal) \
or __is_rule_exempt_helper(rule, principal):
context_dict = {
"title" : "An error occurred...",
"message" : "%s does not apply for %s." % (rule.name,
principal.name),
}
return render_to_response('state/empty_response.html',
context_dict, context)
context_dict["principal"] = principal
allowed = False
try:
if passcode_auth.code != "":
allowed = pwd_context.verify(code, passcode_auth.chash)
elif code == "":
allowed = True
except Exception:
pass
if not allowed:
context_dict.update({
"status" : {
"a" : "Invalid code",
"b" : "denied",
}
})
else:
auth_instance, _ = PasscodeAuthInstance.objects.get_or_create(
principal=principal, rule=rule)
auth_instance.timestamp = now
auth_instance.expire = now + passcode_auth.session_length
auth_instance.save()
context_dict.update({
"status" : {
"a" : "Success",
"b" : "approved",
}
})
return render_to_response('state/passcode_form_submit.html',
context_dict, context)
else:
return HttpResponse(status=403)
| |
from subprocess import call
import os
import shutil
import subprocess
import json
import codecs
from collections import OrderedDict
BUILDER_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(BUILDER_PATH, '..')
DIST_PATH = os.path.join(ROOT_PATH, 'dist')
INPUT_SVG_DIR = os.path.join(ROOT_PATH, 'src')
OUTPUT_SVG_DIR = os.path.join(DIST_PATH, 'svg')
DATA_PATH = os.path.join(DIST_PATH, 'data')
FONTS_FOLDER_PATH = os.path.join(DIST_PATH, 'fonts')
CSS_FOLDER_PATH = os.path.join(DIST_PATH, 'css')
SCSS_FOLDER_PATH = os.path.join(DIST_PATH, 'scss')
def main():
generate_font_files()
data = get_build_data()
generate_data_files(data)
rename_svg_glyph_names(data)
generate_scss(data)
generate_svg_files()
generate_cheatsheet(data)
generate_mode_cheatsheet(data)
generate_icon_comparison(data)
def generate_font_files():
print "Generate Fonts"
cmd = "fontforge -script %s/scripts/generate_font.py" % (BUILDER_PATH)
call(cmd, shell=True)
def generate_data_files(data):
print "Generate Data Files"
icon_names = []
mode_icons = []
logo_icons = []
all_icons = {}
tag_data = get_tag_data()
def get_code_by_name(icon_name):
for ionicon in data['icons']:
if ionicon['name'] == icon_name:
return ionicon['code']
return ''
for ionicon in data['icons']:
name = ""
if ionicon['name'].startswith('ios-'):
name = ionicon['name'][4:]
elif ionicon['name'].startswith('md-'):
name = ionicon['name'][3:]
elif ionicon['name'].startswith('logo-'):
name = ionicon['name'][5:]
if name not in icon_names:
icon_names.append(name)
for icon_name in icon_names:
ios_svg = os.path.join(INPUT_SVG_DIR, 'ios-%s.svg' % (icon_name))
md_svg = os.path.join(INPUT_SVG_DIR, 'md-%s.svg' % (icon_name))
logo_svg = os.path.join(INPUT_SVG_DIR, 'logo-%s.svg' % (icon_name))
if os.path.isfile(ios_svg) and os.path.isfile(md_svg):
mode_icons.append('"%s":1' % icon_name)
all_icons[icon_name] = {
'icons': [
{
'code': get_code_by_name('ios-%s' % (icon_name)),
'name': 'ios-%s' % (icon_name)
},
{
'code': get_code_by_name('ios-%s-outline' % (icon_name)),
'name': 'ios-%s-outline' % (icon_name)
},
{
'code': get_code_by_name('md-%s' % (icon_name)),
'name': 'md-%s' % (icon_name)
}
],
'tags': tag_data.get(icon_name) or icon_name.split('-')
}
elif os.path.isfile(logo_svg):
logo_icons.append('"%s":1' % icon_name)
all_icons[icon_name] = {
'icons': [
{
'code': get_code_by_name('logo-%s' % (icon_name)),
'name': 'logo-%s' % (icon_name) or icon_name.split('-')
}
],
'tags': tag_data.get(icon_name) or icon_name.split('-')
}
elif '-outline' in icon_name:
continue
else:
print 'wtf %s' % icon_name
output = '{\n' + ',\n'.join(mode_icons) + '\n}'
f = codecs.open(os.path.join(DATA_PATH, 'mode-icons.json'), 'w', 'utf-8')
f.write(output)
f.close()
output = '{\n' + ',\n'.join(logo_icons) + '\n}'
f = codecs.open(os.path.join(DATA_PATH, 'logo-icons.json'), 'w', 'utf-8')
f.write(output)
f.close()
all_icons = OrderedDict(sorted(all_icons.items(), key=lambda t: t[0]))
f = codecs.open(os.path.join(DATA_PATH, 'dmicons.json'), 'w', 'utf-8')
f.write( json.dumps(all_icons, indent=2, separators=(',', ': ')) )
f.close()
def generate_svg_files():
print "Generate SVG Files"
shutil.rmtree(OUTPUT_SVG_DIR)
if not os.path.exists(OUTPUT_SVG_DIR):
os.makedirs(OUTPUT_SVG_DIR)
cmd = 'svgo -f %s -o %s' % (INPUT_SVG_DIR, OUTPUT_SVG_DIR)
subprocess.call([cmd], shell=True)
def rename_svg_glyph_names(data):
# hacky and slow (but safe) way to rename glyph-name attributes
svg_path = os.path.join(FONTS_FOLDER_PATH, 'dmicons.svg')
svg_file = codecs.open(svg_path, 'r+', 'utf-8')
svg_text = svg_file.read()
svg_file.seek(0)
for ionicon in data['icons']:
# uniF2CA
org_name = 'uni%s' % (ionicon['code'].replace('0x', '').upper())
ion_name = 'ion-%s' % (ionicon['name'])
svg_text = svg_text.replace(org_name, ion_name)
svg_file.write(svg_text)
svg_file.close()
def generate_scss(data):
print "Generate SCSS"
font_name = data['name']
font_version = data['version']
css_prefix = data['prefix']
variables_file_path = os.path.join(SCSS_FOLDER_PATH, 'dmicons-variables.scss')
common_file_path = os.path.join(SCSS_FOLDER_PATH, 'dmicons-common.scss')
icons_file_path = os.path.join(SCSS_FOLDER_PATH, 'dmicons-icons.scss')
d = []
d.append('@charset "UTF-8";')
d.append('// DMicons Variables')
d.append('// --------------------------\n')
d.append('$dmicons-font-path: "../fonts" !default;')
d.append('$dmicons-font-family: "%s" !default;' % (font_name) )
d.append('$dmicons-version: "%s" !default;' % (font_version) )
f = codecs.open(variables_file_path, 'w', 'utf-8')
f.write( u'\n'.join(d) )
f.close()
d = []
d.append('@charset "UTF-8";')
d.append('// DMicons Common CSS')
d.append('// --------------------------\n')
group = [ '.%s' % (data['name'].lower()) ]
for ionicon in data['icons']:
group.append('.%s%s:before' % (css_prefix, ionicon['name']) )
d.append( ',\n'.join(group) )
d.append('{')
d.append(' @extend .ion;')
d.append('}')
f = codecs.open(common_file_path, 'w', 'utf-8')
f.write( '\n'.join(d) )
f.close()
d = []
d.append('@charset "UTF-8";')
d.append('// DMicons Icon Font CSS')
d.append('// --------------------------\n')
for ionicon in data['icons']:
chr_code = ionicon['code'].replace('0x', '\\')
d.append('.%s%s:before { content: "%s"; }' % (css_prefix, ionicon['name'], chr_code) )
f = codecs.open(icons_file_path, 'w', 'utf-8')
f.write( '\n'.join(d) )
f.close()
generate_css_from_scss(data)
def generate_css_from_scss(data):
compile_scss_to_css('dmicons', data)
compile_scss_to_css('dmicons-core', data)
def compile_scss_to_css(filename, data):
scss_file_path = os.path.join(SCSS_FOLDER_PATH, '%s.scss' % filename)
css_file_path = os.path.join(CSS_FOLDER_PATH, '%s.css' % filename)
css_min_file_path = os.path.join(CSS_FOLDER_PATH, '%s.min.css' % filename)
print "Generate CSS From %s" % filename
cmd = "sass %s %s --style compact" % (scss_file_path, css_file_path)
call(cmd, shell=True)
cmd = "sass %s %s --style compressed" % (scss_file_path, css_min_file_path)
call(cmd, shell=True)
def generate_cheatsheet(data):
print "Generate Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-row.html')
f = codecs.open(template_path, 'r', 'utf-8')
template_html = f.read()
f.close()
f = codecs.open(icon_row_path, 'r', 'utf-8')
icon_row_template = f.read()
f.close()
content = []
for ionicon in data['icons']:
css_code = ionicon['code'].replace('0x', '\\')
escaped_html_code = ionicon['code'].replace('0x', '&#x') + ';'
html_code = ionicon['code'].replace('0x', '&#x') + ';'
item_row = icon_row_template
item_row = item_row.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
item_row = item_row.replace('{{css_code}}', css_code)
item_row = item_row.replace('{{escaped_html_code}}', escaped_html_code)
item_row = item_row.replace('{{html_code}}', html_code)
content.append(item_row)
template_html = template_html.replace("{{title}}", 'Cheatsheet')
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(data["icons"])) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = codecs.open(cheatsheet_file_path, 'w', 'utf-8')
f.write(template_html)
f.close()
def generate_mode_cheatsheet(data):
print "Generate Mode Cheatsheet"
cheatsheet_file_path = os.path.join(ROOT_PATH, 'mode-cheatsheet.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'mode-icon-row.html')
f = codecs.open(template_path, 'r', 'utf-8')
template_html = f.read()
f.close()
f = codecs.open(icon_row_path, 'r', 'utf-8')
icon_row_template = f.read()
f.close()
content = []
icon_names = []
content.append('''
<div class="mode-row">
<div class="mode-col">
<strong>Icon Name</strong>
</div>
<div class="mode-col align-center">
<strong>iOS</strong>
</div>
<div class="mode-col align-center">
<strong>Material Design</strong>
</div>
</div>
''')
for ionicon in data['icons']:
name = ""
if ionicon['name'].startswith('ios-'):
name = ionicon['name'][4:]
elif ionicon['name'].startswith('md-'):
name = ionicon['name'][3:]
if name not in icon_names and not name.endswith('-outline'):
icon_names.append(name)
for icon_name in icon_names:
item_row = icon_row_template.replace('{{name}}', icon_name)
item_row = item_row.replace('{{prefix}}', data['prefix'])
content.append(item_row)
template_html = template_html.replace("{{title}}", 'Mode Cheatsheet')
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(len(icon_names)) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = codecs.open(cheatsheet_file_path, 'w', 'utf-8')
f.write(template_html)
f.close()
def generate_icon_comparison(data):
print "Generate Icon Comparison"
comparison_file_path = os.path.join(ROOT_PATH, 'icon-comparison.html')
template_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'template.html')
icon_row_path = os.path.join(BUILDER_PATH, 'cheatsheet', 'icon-comparison-row.html')
f = codecs.open(template_path, 'r', 'utf-8')
template_html = f.read()
f.close()
f = codecs.open(icon_row_path, 'r', 'utf-8')
icon_row_template = f.read()
f.close()
content = []
icon_count = 0
content.append('''
<div class="comparison-row">
<div class="comparison-col">
<h2>Source SVG</h2>
</div>
<div class="comparison-col">
<h2>Optimized SVG</h2>
</div>
<div class="comparison-col">
<h2>Icon Font</h2>
</div>
</div>
''')
for ionicon in data['icons']:
src_svg_file = os.path.join(INPUT_SVG_DIR, '%s.svg' % (ionicon['name']))
if not os.path.isfile(src_svg_file):
continue
icon_count += 1
item_row = icon_row_template.replace('{{name}}', ionicon['name'])
item_row = item_row.replace('{{prefix}}', data['prefix'])
src_svg = 'src/%s.svg' % (ionicon['name'])
item_row = item_row.replace('{{src_svg}}', src_svg)
src_svg_size = os.path.getsize(src_svg_file)
item_row = item_row.replace('{{src_svg_size}}', str(src_svg_size))
optimized_svg = 'dist/svg/%s.svg' % (ionicon['name'])
item_row = item_row.replace('{{optimized_svg}}', optimized_svg)
optimized_svg_file = os.path.join(OUTPUT_SVG_DIR, '%s.svg' % (ionicon['name']))
optimized_svg_size = os.path.getsize(optimized_svg_file)
item_row = item_row.replace('{{optimized_svg_size}}', str(optimized_svg_size))
content.append(item_row)
template_html = template_html.replace("{{title}}", 'Icon Format Comparison')
template_html = template_html.replace("{{font_name}}", data["name"])
template_html = template_html.replace("{{font_version}}", data["version"])
template_html = template_html.replace("{{icon_count}}", str(icon_count) )
template_html = template_html.replace("{{content}}", '\n'.join(content) )
f = codecs.open(comparison_file_path, 'w', 'utf-8')
f.write(template_html)
f.close()
def get_build_data():
build_data_path = os.path.join(BUILDER_PATH, 'build_data.json')
f = codecs.open(build_data_path, 'r', 'utf-8')
data = json.loads(f.read())
f.close()
package_json_path = os.path.join(ROOT_PATH, 'package.json')
f = codecs.open(package_json_path, 'r', 'utf-8')
package_data = json.loads(f.read())
f.close()
data['version'] = package_data['version']
return data
def get_tag_data():
tag_data_path = os.path.join(BUILDER_PATH, 'tags.json')
f = codecs.open(tag_data_path, 'r', 'utf-8')
data = json.loads(f.read())
f.close()
return data
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
"""Functions for prompting the user for project info."""
from collections import OrderedDict
import json
import click
import six
from jinja2.exceptions import UndefinedError
from cookiecutter.exceptions import UndefinedVariableInTemplate
from cookiecutter.environment import StrictEnvironment
def read_user_variable(var_name, default_value):
"""Prompt user for variable and return the entered value or given default.
:param str var_name: Variable of the context to query the user
:param default_value: Value that will be returned if no input happens
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
return click.prompt(var_name, default=default_value)
def read_user_yes_no(question, default_value):
"""Prompt the user to reply with 'yes' or 'no' (or equivalent values).
Note:
Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
:param str question: Question to the user
:param default_value: Value that will be returned if no input happens
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
return click.prompt(
question,
default=default_value,
type=click.BOOL
)
def read_repo_password(question):
"""Prompt the user to enter a password.
:param str question: Question to the user
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
return click.prompt(question, hide_input=True)
def read_user_choice(var_name, options):
"""Prompt the user to choose from several options for the given variable.
The first item will be returned if no input happens.
:param str var_name: Variable as specified in the context
:param list options: Sequence of options that are available to select from
:return: Exactly one item of ``options`` that has been chosen by the user
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
if not isinstance(options, list):
raise TypeError
if not options:
raise ValueError
choice_map = OrderedDict(
(u'{}'.format(i), value) for i, value in enumerate(options, 1)
)
choices = choice_map.keys()
default = u'1'
choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]
prompt = u'\n'.join((
u'Select {}:'.format(var_name),
u'\n'.join(choice_lines),
u'Choose from {}'.format(u', '.join(choices))
))
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default, show_choices=False
)
return choice_map[user_choice]
def process_json(user_value):
"""Load user-supplied value as a JSON dict.
:param str user_value: User-supplied value to load as a JSON dict
"""
try:
user_dict = json.loads(
user_value,
object_pairs_hook=OrderedDict,
)
except Exception:
# Leave it up to click to ask the user again
raise click.UsageError('Unable to decode to JSON.')
if not isinstance(user_dict, dict):
# Leave it up to click to ask the user again
raise click.UsageError('Requires JSON dict.')
return user_dict
def read_user_dict(var_name, default_value):
"""Prompt the user to provide a dictionary of data.
:param str var_name: Variable as specified in the context
:param default_value: Value that will be returned if no input is provided
:return: A Python dictionary to use in the context.
"""
# Please see http://click.pocoo.org/4/api/#click.prompt
if not isinstance(default_value, dict):
raise TypeError
default_display = 'default'
user_value = click.prompt(
var_name,
default=default_display,
type=click.STRING,
value_proc=process_json,
)
if user_value == default_display:
# Return the given default w/o any processing
return default_value
return user_value
def render_variable(env, raw, cookiecutter_dict):
"""Render the next variable to be displayed in the user prompt.
Inside the prompting taken from the cookiecutter.json file, this renders
the next variable. For example, if a project_name is "Peanut Butter
Cookie", the repo_name could be be rendered with:
`{{ cookiecutter.project_name.replace(" ", "_") }}`.
This is then presented to the user as the default.
:param Environment env: A Jinja2 Environment object.
:param str raw: The next value to be prompted for by the user.
:param dict cookiecutter_dict: The current context as it's gradually
being populated with variables.
:return: The rendered value for the default variable.
"""
if raw is None:
return None
elif isinstance(raw, dict):
return {
render_variable(env, k, cookiecutter_dict):
render_variable(env, v, cookiecutter_dict)
for k, v in raw.items()
}
elif isinstance(raw, list):
return [
render_variable(env, v, cookiecutter_dict)
for v in raw
]
elif not isinstance(raw, six.string_types):
raw = str(raw)
template = env.from_string(raw)
rendered_template = template.render(cookiecutter=cookiecutter_dict)
return rendered_template
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
"""Prompt user with a set of options to choose from.
Each of the possible choices is rendered beforehand.
"""
rendered_options = [
render_variable(env, raw, cookiecutter_dict) for raw in options
]
if no_input:
return rendered_options[0]
return read_user_choice(key, rendered_options)
def prompt_for_config(context, no_input=False):
"""Prompt user to enter a new config.
:param dict context: Source for field names and sample values.
:param no_input: Prompt the user at command line for manual configuration?
"""
cookiecutter_dict = OrderedDict([])
env = StrictEnvironment(context=context)
# First pass: Handle simple and raw variables, plus choices.
# These must be done first because the dictionaries keys and
# values might refer to them.
for key, raw in context[u'cookiecutter'].items():
if key.startswith(u'_'):
cookiecutter_dict[key] = raw
continue
try:
if isinstance(raw, list):
# We are dealing with a choice variable
val = prompt_choice_for_config(
cookiecutter_dict, env, key, raw, no_input
)
cookiecutter_dict[key] = val
elif not isinstance(raw, dict):
# We are dealing with a regular variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_variable(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
# Second pass; handle the dictionaries.
for key, raw in context[u'cookiecutter'].items():
try:
if isinstance(raw, dict):
# We are dealing with a dict variable
val = render_variable(env, raw, cookiecutter_dict)
if not no_input:
val = read_user_dict(key, val)
cookiecutter_dict[key] = val
except UndefinedError as err:
msg = "Unable to render variable '{}'".format(key)
raise UndefinedVariableInTemplate(msg, err, context)
return cookiecutter_dict
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import bigtable_admin_v2
from google.cloud.bigtable_admin_v2 import enums
from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2
from google.cloud.bigtable_admin_v2.proto import instance_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestBigtableInstanceAdminClient(object):
def test_create_instance(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
expected_response = {'name': name, 'display_name': display_name}
expected_response = instance_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_create_instance', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path('[PROJECT]')
instance_id = 'instanceId-2101995259'
instance = {}
clusters = {}
response = client.create_instance(parent, instance_id, instance,
clusters)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest(
parent=parent,
instance_id=instance_id,
instance=instance,
clusters=clusters)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_create_instance_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path('[PROJECT]')
instance_id = 'instanceId-2101995259'
instance = {}
clusters = {}
response = client.create_instance(parent, instance_id, instance,
clusters)
exception = response.exception()
assert exception.errors[0] == error
def test_get_instance(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name = 'displayName1615086568'
expected_response = {'name': name_2, 'display_name': display_name}
expected_response = instance_pb2.Instance(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
response = client.get_instance(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
with pytest.raises(CustomException):
client.get_instance(name)
def test_list_instances(self):
# Setup Expected Response
next_page_token = 'nextPageToken-1530815211'
expected_response = {'next_page_token': next_page_token}
expected_response = bigtable_instance_admin_pb2.ListInstancesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path('[PROJECT]')
response = client.list_instances(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListInstancesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_instances_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.project_path('[PROJECT]')
with pytest.raises(CustomException):
client.list_instances(parent)
def test_update_instance(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
display_name_2 = 'displayName21615000987'
expected_response = {'name': name_2, 'display_name': display_name_2}
expected_response = instance_pb2.Instance(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
display_name = 'displayName1615086568'
type_ = enums.Instance.Type.TYPE_UNSPECIFIED
labels = {}
response = client.update_instance(name, display_name, type_, labels)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = instance_pb2.Instance(
name=name, display_name=display_name, type=type_, labels=labels)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
display_name = 'displayName1615086568'
type_ = enums.Instance.Type.TYPE_UNSPECIFIED
labels = {}
with pytest.raises(CustomException):
client.update_instance(name, display_name, type_, labels)
def test_partial_update_instance(self):
# Setup Expected Response
name = 'name3373707'
display_name = 'displayName1615086568'
expected_response = {'name': name, 'display_name': display_name}
expected_response = instance_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_partial_update_instance', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
instance = {}
update_mask = {}
response = client.partial_update_instance(instance, update_mask)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest(
instance=instance, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_partial_update_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_partial_update_instance_exception',
done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
instance = {}
update_mask = {}
response = client.partial_update_instance(instance, update_mask)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_instance(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
client.delete_instance(name)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path('[PROJECT]', '[INSTANCE]')
with pytest.raises(CustomException):
client.delete_instance(name)
def test_create_cluster(self):
# Setup Expected Response
name = 'name3373707'
location = 'location1901043637'
serve_nodes = 1288838783
expected_response = {
'name': name,
'location': location,
'serve_nodes': serve_nodes
}
expected_response = instance_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_create_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
cluster_id = 'clusterId240280960'
cluster = {}
response = client.create_cluster(parent, cluster_id, cluster)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateClusterRequest(
parent=parent, cluster_id=cluster_id, cluster=cluster)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_create_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
cluster_id = 'clusterId240280960'
cluster = {}
response = client.create_cluster(parent, cluster_id, cluster)
exception = response.exception()
assert exception.errors[0] == error
def test_get_cluster(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
location = 'location1901043637'
serve_nodes = 1288838783
expected_response = {
'name': name_2,
'location': location,
'serve_nodes': serve_nodes
}
expected_response = instance_pb2.Cluster(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
response = client.get_cluster(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetClusterRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_cluster_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
with pytest.raises(CustomException):
client.get_cluster(name)
def test_list_clusters(self):
# Setup Expected Response
next_page_token = 'nextPageToken-1530815211'
expected_response = {'next_page_token': next_page_token}
expected_response = bigtable_instance_admin_pb2.ListClustersResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
response = client.list_clusters(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListClustersRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_clusters_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
with pytest.raises(CustomException):
client.list_clusters(parent)
def test_update_cluster(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
location = 'location1901043637'
serve_nodes_2 = 1623486220
expected_response = {
'name': name_2,
'location': location,
'serve_nodes': serve_nodes_2
}
expected_response = instance_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_update_cluster', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
serve_nodes = 1288838783
response = client.update_cluster(name, serve_nodes)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = instance_pb2.Cluster(
name=name, serve_nodes=serve_nodes)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_update_cluster_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
serve_nodes = 1288838783
response = client.update_cluster(name, serve_nodes)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_cluster(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
client.delete_cluster(name)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_cluster_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]')
with pytest.raises(CustomException):
client.delete_cluster(name)
def test_create_app_profile(self):
# Setup Expected Response
name = 'name3373707'
etag = 'etag3123477'
description = 'description-1724546052'
expected_response = {
'name': name,
'etag': etag,
'description': description
}
expected_response = instance_pb2.AppProfile(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
app_profile_id = 'appProfileId1262094415'
app_profile = {}
response = client.create_app_profile(parent, app_profile_id,
app_profile)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest(
parent=parent,
app_profile_id=app_profile_id,
app_profile=app_profile)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
app_profile_id = 'appProfileId1262094415'
app_profile = {}
with pytest.raises(CustomException):
client.create_app_profile(parent, app_profile_id, app_profile)
def test_get_app_profile(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
etag = 'etag3123477'
description = 'description-1724546052'
expected_response = {
'name': name_2,
'etag': etag,
'description': description
}
expected_response = instance_pb2.AppProfile(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.app_profile_path('[PROJECT]', '[INSTANCE]',
'[APP_PROFILE]')
response = client.get_app_profile(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.app_profile_path('[PROJECT]', '[INSTANCE]',
'[APP_PROFILE]')
with pytest.raises(CustomException):
client.get_app_profile(name)
def test_list_app_profiles(self):
# Setup Expected Response
next_page_token = ''
app_profiles_element = {}
app_profiles = [app_profiles_element]
expected_response = {
'next_page_token': next_page_token,
'app_profiles': app_profiles
}
expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_app_profiles(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.app_profiles[0] == resources[0]
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_app_profiles_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path('[PROJECT]', '[INSTANCE]')
paged_list_response = client.list_app_profiles(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_app_profile(self):
# Setup Expected Response
name = 'name3373707'
etag = 'etag3123477'
description = 'description-1724546052'
expected_response = {
'name': name,
'etag': etag,
'description': description
}
expected_response = instance_pb2.AppProfile(**expected_response)
operation = operations_pb2.Operation(
name='operations/test_update_app_profile', done=True)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
app_profile = {}
update_mask = {}
response = client.update_app_profile(app_profile, update_mask)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest(
app_profile=app_profile, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_app_profile_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name='operations/test_update_app_profile_exception', done=True)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
app_profile = {}
update_mask = {}
response = client.update_app_profile(app_profile, update_mask)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_app_profile(self):
channel = ChannelStub()
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.app_profile_path('[PROJECT]', '[INSTANCE]',
'[APP_PROFILE]')
ignore_warnings = True
client.delete_app_profile(name, ignore_warnings)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest(
name=name, ignore_warnings=ignore_warnings)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.app_profile_path('[PROJECT]', '[INSTANCE]',
'[APP_PROFILE]')
ignore_warnings = True
with pytest.raises(CustomException):
client.delete_app_profile(name, ignore_warnings)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'etag3123477'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'etag3123477'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch('google.api_core.grpc_helpers.create_channel')
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path('[PROJECT]', '[INSTANCE]')
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
| |
import functools
from urllib.parse import urlencode, urljoin
from django import http, forms
from django.conf import settings
from django.contrib import admin
from django.core import validators
from django.forms.models import modelformset_factory
from django.http.response import (
HttpResponseForbidden,
HttpResponseNotAllowed,
HttpResponseRedirect,
)
from django.shortcuts import get_object_or_404
from django.urls import re_path, resolve, reverse
from django.utils.encoding import force_str
from django.utils.html import format_html, format_html_join
from django.utils.translation import gettext, gettext_lazy as _
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.activity.models import ActivityLog
from olympia.addons.models import Addon, AddonUser
from olympia.amo.utils import send_mail
from olympia.files.models import File
from olympia.git.models import GitExtractionEntry
from olympia.ratings.models import Rating
from olympia.versions.models import Version
from olympia.zadmin.admin import related_content_link
from . import models
from .forms import AdminBaseFileFormSet, FileStatusForm
log = olympia.core.logger.getLogger('z.addons.admin')
class AddonUserInline(admin.TabularInline):
model = AddonUser
raw_id_fields = ('user',)
readonly_fields = ('user_profile_link',)
extra = 0
def user_profile_link(self, obj):
if obj.pk:
return format_html(
'<a href="{}">Admin User Profile</a> ({})',
reverse('admin:users_userprofile_change', args=(obj.user.pk,)),
obj.user.email,
)
else:
return ''
user_profile_link.short_description = 'User Profile'
class FileInlineChecks(admin.checks.InlineModelAdminChecks):
def _check_relation(self, obj, parent_model):
"""File doesn't have a direct FK to Addon (it's via Version) so we have
to bypass this check.
"""
return []
class FileInline(admin.TabularInline):
model = File
extra = 0
max_num = 0
fields = (
'created',
'version__version',
'version__channel',
'status',
'version__is_blocked',
'hash_link',
)
editable_fields = ('status',)
readonly_fields = tuple(set(fields) - set(editable_fields))
can_delete = False
view_on_site = False
template = 'admin/addons/file_inline.html'
checks_class = FileInlineChecks
def version__version(self, obj):
return obj.version.version + (' - Deleted' if obj.version.deleted else '')
version__version.short_description = 'Version'
def version__channel(self, obj):
return obj.version.get_channel_display()
version__channel.short_description = 'Channel'
def version__is_blocked(self, obj):
block = self.instance.block
if not (block and block.is_version_blocked(obj.version.version)):
return ''
url = block.get_admin_url_path()
template = '<a href="{}">Blocked ({} - {})</a>'
return format_html(template, url, block.min_version, block.max_version)
version__is_blocked.short_description = 'Block status'
def hash_link(self, obj):
url = reverse('zadmin.recalc_hash', args=(obj.id,))
template = '<a href="{}" class="recalc" title="{}">Recalc Hash</a>'
return format_html(template, url, obj.hash)
hash_link.short_description = 'Hash'
def get_formset(self, request, obj=None, **kwargs):
self.instance = obj
Formset = modelformset_factory(
File,
form=FileStatusForm,
formset=AdminBaseFileFormSet,
extra=self.get_extra(request, obj, **kwargs),
min_num=self.get_min_num(request, obj, **kwargs),
max_num=self.get_max_num(request, obj, **kwargs),
)
return Formset
def has_add_permission(self, request, obj=None):
return False
def get_queryset(self, request):
self.pager = amo.utils.paginate(
request,
Version.unfiltered.filter(addon=self.instance).values_list('pk', flat=True),
30,
)
# A list coercion so this doesn't result in a subquery with a LIMIT
# which MySQL doesn't support (at this time).
versions = list(self.pager.object_list)
qs = (
super()
.get_queryset(request)
.filter(version__in=versions)
.order_by('-version__id')
)
return qs.select_related('version')
class AddonAdmin(admin.ModelAdmin):
class Media:
css = {
'all': (
'css/admin/l10n.css',
'css/admin/pagination.css',
'css/admin/addons.css',
)
}
js = ('admin/js/jquery.init.js', 'js/admin/l10n.js', 'js/admin/recalc_hash.js')
list_display = (
'__str__',
'type',
'guid',
'status',
'average_daily_users',
'average_rating',
'authors_links',
'reviewer_links',
)
list_filter = ('type', 'status')
search_fields = ('id', '^guid', '^slug')
inlines = (AddonUserInline, FileInline)
readonly_fields = (
'id',
'created',
'average_rating',
'bayesian_rating',
'guid',
'total_ratings_link',
'text_ratings_count',
'weekly_downloads',
'average_daily_users',
)
fieldsets = (
(
None,
{
'fields': (
'id',
'created',
'name',
'slug',
'guid',
'default_locale',
'type',
'status',
),
},
),
(
'Details',
{
'fields': (
'summary',
'description',
'homepage',
'eula',
'privacy_policy',
'developer_comments',
'icon_type',
),
},
),
(
'Support',
{
'fields': ('support_url', 'support_email'),
},
),
(
'Stats',
{
'fields': (
'total_ratings_link',
'average_rating',
'bayesian_rating',
'text_ratings_count',
'weekly_downloads',
'average_daily_users',
),
},
),
(
'Flags',
{
'fields': (
'disabled_by_user',
'requires_payment',
'is_experimental',
'reputation',
),
},
),
(
'Dictionaries and Language Packs',
{
'fields': ('target_locale',),
},
),
)
actions = ['git_extract_action']
def get_queryset(self, request):
# We want to _unlisted_versions_exists/_listed_versions_exists to avoid
# repeating that query for each add-on in the list. A cleaner way to do this
# would be to use annotate like this:
# sub_qs = Version.unfiltered.filter(addon=OuterRef('pk')).values_list('id')
# (...).annotate(
# _unlisted_versions_exists=Exists(
# sub_qs.filter(channel=amo.RELEASE_CHANNEL_UNLISTED)
# ),
# _listed_versions_exists=Exists(
# sub_qs.filter(channel=amo.RELEASE_CHANNEL_LISTED)
# ),
# )
# But while this works, the subquery is a lot less optimized (it does a full
# query instead of the SELECT 1 ... LIMIT 1) and to make things worse django
# admin doesn't know it's only for displayed data (it doesn't realize we aren't
# filtering on it, and even if it did can't remove the annotations from the
# queryset anyway) so it uses it for the count() queries as well, making them a
# lot slower.
subquery = (
'SELECT 1 FROM `versions` WHERE `channel` = %s'
' AND `addon_id` = `addons`.`id` LIMIT 1'
)
extra = {
'select': {
'_unlisted_versions_exists': subquery,
'_listed_versions_exists': subquery,
},
'select_params': (
amo.RELEASE_CHANNEL_UNLISTED,
amo.RELEASE_CHANNEL_LISTED,
),
}
return (
Addon.unfiltered.all()
.only_translations()
.transform(Addon.attach_all_authors)
.extra(**extra)
)
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return functools.update_wrapper(wrapper, view)
urlpatterns = super().get_urls()
custom_urlpatterns = [
re_path(
r'^(?P<object_id>.+)/git_extract/$',
wrap(self.git_extract_view),
name='addons_git_extract',
),
]
return custom_urlpatterns + urlpatterns
def authors_links(self, obj):
# Note: requires .transform(Addon.attach_all_authors) to have been
# applied to fill all_authors property and role on each user in it.
authors = obj.all_authors
return (
format_html(
'<ul>{}</ul>',
format_html_join(
'',
'<li><a href="{}">{} ({}{})</a></li>',
(
(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse(
'admin:users_userprofile_change', args=(author.pk,)
),
),
author.email,
dict(amo.AUTHOR_CHOICES_UNFILTERED)[author.role],
', Not listed' if author.listed is False else '',
)
for author in authors
),
),
)
if authors
else '-'
)
authors_links.short_description = _('Authors')
def total_ratings_link(self, obj):
return related_content_link(
obj,
Rating,
'addon',
related_manager='without_replies',
text=obj.total_ratings,
)
total_ratings_link.short_description = _('Ratings')
def reviewer_links(self, obj):
links = []
# _has_listed_versions_exists and _has_unlisted_versions_exists are
# provided by annotations made in get_queryset()
if obj._listed_versions_exists:
links.append(
'<a href="{}">{}</a>'.format(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['listed', obj.id]),
),
_('Reviewer Tools (listed)'),
)
)
if obj._unlisted_versions_exists:
links.append(
'<a href="{}">{}</a>'.format(
urljoin(
settings.EXTERNAL_SITE_URL,
reverse('reviewers.review', args=['unlisted', obj.id]),
),
_('Reviewer Tools (unlisted)'),
)
)
return format_html(' | '.join(links))
reviewer_links.short_description = _('Reviewer links')
def change_view(self, request, object_id, form_url='', extra_context=None):
lookup_field = Addon.get_lookup_field(object_id)
if lookup_field != 'pk':
addon = None
try:
if lookup_field in ('slug', 'guid'):
addon = self.get_queryset(request).get(**{lookup_field: object_id})
except Addon.DoesNotExist:
raise http.Http404
# Don't get in an infinite loop if addon.slug.isdigit().
if addon and addon.id and addon.id != object_id:
url = request.path.replace(object_id, str(addon.id), 1)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(url)
return super().change_view(
request, object_id, form_url, extra_context=extra_context
)
def render_change_form(
self, request, context, add=False, change=False, form_url='', obj=None
):
context.update(
{
'external_site_url': settings.EXTERNAL_SITE_URL,
'has_listed_versions': obj.has_listed_versions(include_deleted=True)
if obj
else False,
'has_unlisted_versions': obj.has_unlisted_versions(include_deleted=True)
if obj
else False,
}
)
return super().render_change_form(
request=request,
context=context,
add=add,
change=change,
form_url=form_url,
obj=obj,
)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if 'status' in form.changed_data:
ActivityLog.create(amo.LOG.CHANGE_STATUS, obj, form.cleaned_data['status'])
log.info(
'Addon "%s" status changed to: %s'
% (obj.slug, form.cleaned_data['status'])
)
def git_extract_action(self, request, qs):
addon_ids = []
for addon in qs:
GitExtractionEntry.objects.create(addon=addon)
addon_ids.append(force_str(addon))
kw = {'addons': ', '.join(addon_ids)}
self.message_user(
request, gettext('Git extraction triggered for "%(addons)s".' % kw)
)
git_extract_action.short_description = 'Git-Extract'
def git_extract_view(self, request, object_id, extra_context=None):
if request.method != 'POST':
return HttpResponseNotAllowed(['POST'])
if not acl.action_allowed(request, amo.permissions.ADDONS_EDIT):
return HttpResponseForbidden()
obj = get_object_or_404(Addon, id=object_id)
self.git_extract_action(request, (obj,))
return HttpResponseRedirect(
reverse('admin:addons_addon_change', args=(obj.pk,))
)
class FrozenAddonAdmin(admin.ModelAdmin):
raw_id_fields = ('addon',)
class ReplacementAddonForm(forms.ModelForm):
def clean_path(self):
path = None
try:
path = self.data.get('path')
site = settings.SITE_URL
if models.ReplacementAddon.path_is_external(path):
if path.startswith(site):
raise forms.ValidationError(
'Paths for [%s] should be relative, not full URLs '
'including the domain name' % site
)
validators.URLValidator()(path)
else:
path = ('/' if not path.startswith('/') else '') + path
resolve(path)
except forms.ValidationError as validation_error:
# Re-raise the ValidationError about full paths for SITE_URL.
raise validation_error
except Exception:
raise forms.ValidationError('Path [%s] is not valid' % path)
return path
class ReplacementAddonAdmin(admin.ModelAdmin):
list_display = ('guid', 'path', 'guid_slug', '_url')
form = ReplacementAddonForm
def _url(self, obj):
guid_param = urlencode({'guid': obj.guid})
return format_html(
'<a href="{}">Test</a>',
reverse('addons.find_replacement') + '?%s' % guid_param,
)
def guid_slug(self, obj):
try:
slug = models.Addon.objects.get(guid=obj.guid).slug
except models.Addon.DoesNotExist:
slug = gettext('- Add-on not on AMO -')
return slug
def has_module_permission(self, request):
# If one can see the changelist, then they have access to the module.
return self.has_change_permission(request)
def has_change_permission(self, request, obj=None):
# If an obj is passed, then we're looking at the individual change page
# for a replacement addon, otherwise we're looking at the list. When
# looking at the list, we also allow users with Addons:Edit - they
# won't be able to make any changes but they can see the list.
if obj is not None:
return super().has_change_permission(request, obj=obj)
else:
return acl.action_allowed(
request, amo.permissions.ADDONS_EDIT
) or super().has_change_permission(request, obj=obj)
@admin.register(models.AddonRegionalRestrictions)
class AddonRegionalRestrictionsAdmin(admin.ModelAdmin):
list_display = ('addon__name', 'excluded_regions')
fields = ('created', 'modified', 'addon', 'excluded_regions')
raw_id_fields = ('addon',)
readonly_fields = ('created', 'modified')
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields + (('addon',) if obj else ())
def addon__name(self, obj):
return str(obj.addon)
addon__name.short_description = 'Addon'
def _send_mail(self, obj, action):
message = (
f'Regional restriction for addon "{obj.addon.name}" '
f'[{obj.addon.id}] {action}: {obj.excluded_regions}'
)
send_mail(
f'Regional Restriction {action} for Add-on',
message,
recipient_list=('amo-admins@mozilla.com',),
)
def delete_model(self, request, obj):
self._send_mail(obj, 'deleted')
super().delete_model(request, obj)
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
self._send_mail(obj, 'changed' if change else 'added')
admin.site.register(models.DeniedGuid)
admin.site.register(models.Addon, AddonAdmin)
admin.site.register(models.FrozenAddon, FrozenAddonAdmin)
admin.site.register(models.ReplacementAddon, ReplacementAddonAdmin)
| |
# Status
import sys
class Status(object):
"""Status
The status object of the running pools and tasks.
To retrieve the status of a pool, use:
* my_pool.status
To retrieve the status of a task, use:
* my_task.status
.. note:: Read-only class
"""
def __init__(self, json):
self.download_progress = json['downloadProgress']
""":type: :class:`float`
Resources download progress to the instances."""
self.execution_progress = json['executionProgress']
""":type: :class:`float`
Task execution progress."""
self.upload_progress = json['uploadProgress']
""":type: :class:`float`
Task results upload progress to the API."""
self.instance_count = json['instanceCount']
""":type: :class:`int`
Number of running instances."""
self.download_time = json['downloadTime']
""":type: :class:`str`
Resources download time to the instances."""
self.download_time_sec = json['downloadTimeSec']
""":type: :class:`float`
Resources download time to the instances in seconds."""
self.environment_time = json['environmentTime']
""":type: :class:`str`
Environment time to the instances."""
self.environment_time_sec = json['environmentTimeSec']
""":type: :class:`float`
Environment time to the instances in seconds."""
self.execution_time = json['executionTime']
""":type: :class:`str`
Task execution time."""
self.execution_time_sec = json['executionTimeSec']
""":type: :class:`float`
Task execution time in seconds."""
self.upload_time = json['uploadTime']
""":type: :class:`str`
Task results upload time to the API."""
self.upload_time_sec = json['uploadTimeSec']
""":type: :class:`float`
Task results upload time to the API in seconds."""
self.wall_time = json["wallTime"]
""":type: :class:`str`
Wall time of the task."""
self.wall_time_sec = json["wallTimeSec"]
""":type: :class:`float`
Wall time of the task in seconds."""
self.succeeded_range = json['succeededRange']
""":type: :class:`str`
Successful instances range."""
self.executed_range = json['executedRange']
""":type: :class:`str`
Executed instances range."""
self.failed_range = json['failedRange']
""":type: :class:`str`
Failed instances range."""
self.last_update_timestamp = json["lastUpdateTimestamp"]
""":type: :class:`str`
Last update time (UTC)."""
self.execution_time_by_cpu_model = [ExecutionTimeByCpuModel(timeCpu) for timeCpu in json["executionTimeByCpuModel"]]
""":type: :class:`str`
Execution time by cpu."""
self.execution_time_ghz_by_cpu_model = [ExecutionTimeGhzByCpuModel(timeCpu) for timeCpu in json["executionTimeGhzByCpuModel"]]
""":type: :class:`str`
Execution time ghz by cpu."""
self.running_instances_info = None
""":type: :class:`RunningInstancesInfo`
Running instances information."""
if 'runningInstancesInfo' in json and json['runningInstancesInfo'] is not None:
self.running_instances_info = RunningInstancesInfo(json['runningInstancesInfo'])
def __repr__(self):
if sys.version_info > (3, 0):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
else:
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.iteritems()) # pylint: disable=no-member
class RunningInstancesInfo(object):
"""Running Instances Information
.. note:: Read-only class
"""
def __init__(self, json):
self.per_running_instance_info = []
""":type: list(:class:`PerRunningInstanceInfo`)
Per running instances information."""
if 'perRunningInstanceInfo' in json and json['perRunningInstanceInfo'] is not None:
self.per_running_instance_info = [PerRunningInstanceInfo(x) for x in json['perRunningInstanceInfo']]
self.timestamp = json['timestamp']
""":type: :class:`str`
Last information update timestamp."""
self.average_frequency_ghz = json['averageFrequencyGHz']
""":type: :class:`float`
Average Frequency in GHz."""
self.max_frequency_ghz = json['maxFrequencyGHz']
""":type: :class:`float`
Maximum Frequency in GHz."""
self.min_frequency_ghz = json['minFrequencyGHz']
""":type: :class:`float`
Minimum Frequency in GHz."""
self.average_max_frequency_ghz = json['averageMaxFrequencyGHz']
""":type: :class:`float`
Average Maximum Frequency in GHz."""
self.average_cpu_usage = json['averageCpuUsage']
""":type: :class:`float`
Average CPU Usage."""
self.cluster_power_indicator = json['clusterPowerIndicator']
""":type: :class:`float`
Cluster Power Indicator."""
self.average_memory_usage = json['averageMemoryUsage']
""":type: :class:`float`
Average Memory Usage."""
self.average_network_in_kbps = json['averageNetworkInKbps']
""":type: :class:`float`
Average Network Input in Kbps."""
self.average_network_out_kbps = json['averageNetworkOutKbps']
""":type: :class:`float`
Average Network Output in Kbps."""
self.total_network_in_kbps = json['totalNetworkInKbps']
""":type: :class:`float`
Total Network Input in Kbps."""
self.total_network_out_kbps = json['totalNetworkOutKbps']
""":type: :class:`float`
Total Network Output in Kbps."""
self.snapshot_results = json['snapshotResults']
""":type: :class:`float`
Total Network Output in Kbps."""
self.running_core_count_by_cpu_model = json['runningCoreCountByCpuModel']
""":type: :class:`float`
Total Network Output in Kbps."""
def __repr__(self):
if sys.version_info > (3, 0):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
else:
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.iteritems()) # pylint: disable=no-member
class PerRunningInstanceInfo(object):
"""Per Running Instance Information
.. note:: Read-only class
"""
def __init__(self, json):
self.phase = json['phase']
""":type: :class:`str`
Instance phase."""
self.instance_id = json['instanceId']
""":type: :class:`int`
Instance number."""
self.max_frequency_ghz = json['maxFrequencyGHz']
""":type: :class:`float`
Maximum CPU frequency in GHz."""
self.current_frequency_ghz = json['currentFrequencyGHz']
""":type: :class:`float`
Current CPU frequency in GHz."""
self.cpu_usage = json['cpuUsage']
""":type: :class:`float`
Current CPU usage."""
self.max_memory_mb = json['maxMemoryMB']
""":type: :class:`int`
Maximum memory size in MB."""
self.current_memory_mb = json['currentMemoryMB']
""":type: :class:`int`
Current memory size in MB."""
self.memory_usage = json['memoryUsage']
""":type: :class:`float`
Current memory usage."""
self.network_in_kbps = json['networkInKbps']
""":type: :class:`float`
Network Input in Kbps."""
self.network_out_kbps = json['networkOutKbps']
""":type: :class:`float`
Network Output in Kbps."""
self.progress = json['progress']
""":type: :class:`float`
Instance progress."""
self.execution_time_sec = json['executionTimeSec']
""":type: :class:`float`
Instance execution time in seconds."""
self.execution_time_ghz = json['executionTimeGHz']
""":type: :class:`float`
Instance execution time GHz"""
self.cpu_model = json['cpuModel']
""":type: :class:`str`
CPU model"""
self.execution_attempt_count = json.get('executionAttemptCount', 0)
""":type: :class:`int`
Number of execution attempt of an instance, (manly in case of preemption)."""
self.active_forward = []
"""type: list(:class:`TaskActiveForward`)
Active forwards list."""
if 'activeForwards' in json:
self.active_forward = [TaskActiveForward(x) for x in json['activeForwards']]
self.vpn_connections = []
"""type: list(:class:`TaskVpnConnection`)
Vpn connection list."""
if "vpnConnections" in json:
self.vpn_connections = [TaskVpnConnection(x) for x in json["vpnConnections"]]
def __repr__(self):
if sys.version_info > (3, 0):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
else:
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.iteritems()) # pylint: disable=no-member
class TaskActiveForward(object):
"""Task Active Forward
.. note:: Read-only class
"""
def __init__(self, json):
self.application_port = json['applicationPort']
""":type: :class:`int`
Application Port."""
self.forwarder_port = json['forwarderPort']
""":type: :class:`int`
Forwarder Port."""
self.forwarder_host = json['forwarderHost']
""":type: :class:`str`
Forwarder Host."""
self.bind_address = json.get('bindAddress')
""":type: :class:`str`
Bind address of the listening socket on the forwarder host."""
def __repr__(self):
if sys.version_info > (3, 0):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
else:
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.iteritems()) # pylint: disable=no-member
class ExecutionTimeByCpuModel:
"""Execution time by Cpu model
.. note:: Read-only class
"""
def __init__(self, json):
self.model = json["model"]
""":type: :class:`str`
Cpu Model."""
self.time = json["time"]
""":type: :class:`int`
Execution time in seconds."""
self.core = json["core"]
""":type: :class:`int`
CPU Cores."""
def __repr__(self):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
class ExecutionTimeGhzByCpuModel:
"""Execution time Gtz by Cpu model
.. note:: Read-only class
"""
def __init__(self, json):
self.model = json["model"]
""":type: :class:`str`
Cpu Model."""
self.time_ghz = json["timeGhz"]
""":type: :class:`float`
Execution time in Gigahertz."""
self.clock_ratio = json["clockRatio"]
""":type: :class:`int`
Cpu clock ratio."""
self.core = json["core"]
""":type: :class:`int`
CPU Cores."""
def __repr__(self):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
class TaskVpnConnection(object):
""" Vpn Connection Information
.. note:: Read-only class
"""
def __init__(self, json):
self.node_ip_address_cidr = json['nodeIPAddressCidr']
""":type: :class:`str`
Vpn classless inter-domain routing address."""
self.vpn_name = json['vpnName']
""":type: :class:`str`
Vpn name."""
def __repr__(self):
if sys.version_info > (3, 0):
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.items())
else:
return ', '.join("{0}={1}".format(key, val) for (key, val) in self.__dict__.iteritems()) # pylint: disable=no-member
| |
from lxml import etree
from pytz import UTC
import copy
import dateutil.parser
from datetime import timedelta
from .interchange import WaypointType, Activity, ActivityStatistic, ActivityStatistics, ActivityStatisticUnit, ActivityType, Waypoint, Location, Lap, LapIntensity, LapTriggerMethod
from .devices import DeviceIdentifier, DeviceIdentifierType, Device
class TCXIO:
Namespaces = {
None: "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2",
"ns2": "http://www.garmin.com/xmlschemas/UserProfile/v2",
"tpx": "http://www.garmin.com/xmlschemas/ActivityExtension/v2",
"ns4": "http://www.garmin.com/xmlschemas/ProfileExtension/v1",
"ns5": "http://www.garmin.com/xmlschemas/ActivityGoals/v1",
"xsi": "http://www.w3.org/2001/XMLSchema-instance"
}
def Parse(tcxData, act=None):
ns = copy.deepcopy(TCXIO.Namespaces)
ns["tcx"] = ns[None]
del ns[None]
act = act if act else Activity()
act.GPS = False
try:
root = etree.XML(tcxData)
except:
root = etree.fromstring(tcxData)
xacts = root.find("tcx:Activities", namespaces=ns)
if xacts is None:
raise ValueError("No activities element in TCX")
xact = xacts.find("tcx:Activity", namespaces=ns)
if xact is None:
raise ValueError("No activity element in TCX")
xauthor = root.find("tcx:Author", namespaces=ns)
if xauthor is not None:
xauthorname = xauthor.find("tcx:Name", namespaces=ns)
if xauthorname is not None:
if xauthorname.text == "tapiriik":
act.OriginatedFromTapiriik = True
if not act.Type or act.Type == ActivityType.Other:
if xact.attrib["Sport"] == "Biking":
act.Type = ActivityType.Cycling
elif xact.attrib["Sport"] == "Running":
act.Type = ActivityType.Running
xnotes = xact.find("tcx:Notes", namespaces=ns)
if xnotes is not None:
xnotes_lines = xnotes.splitlines()
act.Name = xnotes_lines[0]
if len(xnotes_lines)>1:
act.Notes = '\n'.join(xnotes.text[1:])
xcreator = xact.find("tcx:Creator", namespaces=ns)
if xcreator is not None and xcreator.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] == "Device_t":
devId = DeviceIdentifier.FindMatchingIdentifierOfType(DeviceIdentifierType.TCX, {"ProductID": int(xcreator.find("tcx:ProductID", namespaces=ns).text)}) # Who knows if this is unique in the TCX ecosystem? We'll find out!
xver = xcreator.find("tcx:Version", namespaces=ns)
verMaj = None
verMin = None
if xver is not None:
verMaj = int(xver.find("tcx:VersionMajor", namespaces=ns).text)
verMin = int(xver.find("tcx:VersionMinor", namespaces=ns).text)
act.Device = Device(devId, int(xcreator.find("tcx:UnitId", namespaces=ns).text), verMaj=verMaj, verMin=verMin) # ID vs Id: ???
xlaps = xact.findall("tcx:Lap", namespaces=ns)
startTime = None
endTime = None
for xlap in xlaps:
lap = Lap()
act.Laps.append(lap)
lap.StartTime = dateutil.parser.parse(xlap.attrib["StartTime"])
totalTimeEL = xlap.find("tcx:TotalTimeSeconds", namespaces=ns)
if totalTimeEL is None:
raise ValueError("Missing lap TotalTimeSeconds")
lap.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, float(totalTimeEL.text))
lap.EndTime = lap.StartTime + timedelta(seconds=float(totalTimeEL.text))
distEl = xlap.find("tcx:DistanceMeters", namespaces=ns)
energyEl = xlap.find("tcx:Calories", namespaces=ns)
triggerEl = xlap.find("tcx:TriggerMethod", namespaces=ns)
intensityEl = xlap.find("tcx:Intensity", namespaces=ns)
# Some applications slack off and omit these, despite the fact that they're required in the spec.
# I will, however, require lap distance, because, seriously.
if distEl is None:
raise ValueError("Missing lap DistanceMeters")
lap.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, float(distEl.text))
if energyEl is not None and energyEl.text:
lap.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, float(energyEl.text))
if lap.Stats.Energy.Value == 0:
lap.Stats.Energy.Value = None # It's dumb to make this required, but I digress.
if intensityEl is not None:
lap.Intensity = LapIntensity.Active if intensityEl.text == "Active" else LapIntensity.Rest
else:
lap.Intensity = LapIntensity.Active
if triggerEl is not None:
lap.Trigger = ({
"Manual": LapTriggerMethod.Manual,
"Distance": LapTriggerMethod.Distance,
"Location": LapTriggerMethod.PositionMarked,
"Time": LapTriggerMethod.Time,
"HeartRate": LapTriggerMethod.Manual # I guess - no equivalent in FIT
})[triggerEl.text]
else:
lap.Trigger = LapTriggerMethod.Manual # One would presume
maxSpdEl = xlap.find("tcx:MaximumSpeed", namespaces=ns)
if maxSpdEl is not None:
lap.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, max=float(maxSpdEl.text))
avgHREl = xlap.find("tcx:AverageHeartRateBpm", namespaces=ns)
if avgHREl is not None:
lap.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(avgHREl.find("tcx:Value", namespaces=ns).text))
maxHREl = xlap.find("tcx:MaximumHeartRateBpm", namespaces=ns)
if maxHREl is not None:
lap.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=float(maxHREl.find("tcx:Value", namespaces=ns).text)))
# WF fills these in with invalid values.
lap.Stats.HR.Max = lap.Stats.HR.Max if lap.Stats.HR.Max and lap.Stats.HR.Max > 10 else None
lap.Stats.HR.Average = lap.Stats.HR.Average if lap.Stats.HR.Average and lap.Stats.HR.Average > 10 else None
cadEl = xlap.find("tcx:Cadence", namespaces=ns)
if cadEl is not None:
lap.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=float(cadEl.text))
extsEl = xlap.find("tcx:Extensions", namespaces=ns)
if extsEl is not None:
lxEls = extsEl.findall("tpx:LX", namespaces=ns)
for lxEl in lxEls:
avgSpeedEl = lxEl.find("tpx:AvgSpeed", namespaces=ns)
if avgSpeedEl is not None:
lap.Stats.Speed.update(ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, avg=float(avgSpeedEl.text)))
maxBikeCadEl = lxEl.find("tpx:MaxBikeCadence", namespaces=ns)
if maxBikeCadEl is not None:
lap.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, max=float(maxBikeCadEl.text)))
maxPowerEl = lxEl.find("tpx:MaxWatts", namespaces=ns)
if maxPowerEl is not None:
lap.Stats.Power.update(ActivityStatistic(ActivityStatisticUnit.Watts, max=float(maxPowerEl.text)))
avgPowerEl = lxEl.find("tpx:AvgWatts", namespaces=ns)
if avgPowerEl is not None:
lap.Stats.Power.update(ActivityStatistic(ActivityStatisticUnit.Watts, avg=float(avgPowerEl.text)))
maxRunCadEl = lxEl.find("tpx:MaxRunCadence", namespaces=ns)
if maxRunCadEl is not None:
lap.Stats.RunCadence.update(ActivityStatistic(ActivityStatisticUnit.StepsPerMinute, max=float(maxRunCadEl.text)))
avgRunCadEl = lxEl.find("tpx:AvgRunCadence", namespaces=ns)
if avgRunCadEl is not None:
lap.Stats.RunCadence.update(ActivityStatistic(ActivityStatisticUnit.StepsPerMinute, avg=float(avgRunCadEl.text)))
stepsEl = lxEl.find("tpx:Steps", namespaces=ns)
if stepsEl is not None:
lap.Stats.Strides.update(ActivityStatistic(ActivityStatisticUnit.Strides, value=float(stepsEl.text)))
xtrkseg = xlap.find("tcx:Track", namespaces=ns)
if xtrkseg is None:
# Some TCX files have laps with no track - not sure if it's valid or not.
continue
for xtrkpt in xtrkseg.findall("tcx:Trackpoint", namespaces=ns):
wp = Waypoint()
tsEl = xtrkpt.find("tcx:Time", namespaces=ns)
if tsEl is None:
raise ValueError("Trackpoint without timestamp")
wp.Timestamp = dateutil.parser.parse(tsEl.text)
wp.Timestamp.replace(tzinfo=UTC)
if startTime is None or wp.Timestamp < startTime:
startTime = wp.Timestamp
if endTime is None or wp.Timestamp > endTime:
endTime = wp.Timestamp
xpos = xtrkpt.find("tcx:Position", namespaces=ns)
if xpos is not None:
act.GPS = True
wp.Location = Location(float(xpos.find("tcx:LatitudeDegrees", namespaces=ns).text), float(xpos.find("tcx:LongitudeDegrees", namespaces=ns).text), None)
eleEl = xtrkpt.find("tcx:AltitudeMeters", namespaces=ns)
if eleEl is not None:
wp.Location = wp.Location if wp.Location else Location(None, None, None)
wp.Location.Altitude = float(eleEl.text)
distEl = xtrkpt.find("tcx:DistanceMeters", namespaces=ns)
if distEl is not None:
wp.Distance = float(distEl.text)
hrEl = xtrkpt.find("tcx:HeartRateBpm", namespaces=ns)
if hrEl is not None:
wp.HR = float(hrEl.find("tcx:Value", namespaces=ns).text)
cadEl = xtrkpt.find("tcx:Cadence", namespaces=ns)
if cadEl is not None:
wp.Cadence = float(cadEl.text)
extsEl = xtrkpt.find("tcx:Extensions", namespaces=ns)
if extsEl is not None:
tpxEl = extsEl.find("tpx:TPX", namespaces=ns)
if tpxEl is not None:
powerEl = tpxEl.find("tpx:Watts", namespaces=ns)
if powerEl is not None:
wp.Power = float(powerEl.text)
speedEl = tpxEl.find("tpx:Speed", namespaces=ns)
if speedEl is not None:
wp.Speed = float(speedEl.text)
runCadEl = tpxEl.find("tpx:RunCadence", namespaces=ns)
if runCadEl is not None:
wp.RunCadence = float(runCadEl.text)
lap.Waypoints.append(wp)
xtrkpt.clear()
del xtrkpt
if len(lap.Waypoints):
lap.EndTime = lap.Waypoints[-1].Timestamp
act.StartTime = act.Laps[0].StartTime if len(act.Laps) else act.StartTime
act.EndTime = act.Laps[-1].EndTime if len(act.Laps) else act.EndTime
if act.CountTotalWaypoints():
act.Stationary = False
act.GetFlatWaypoints()[0].Type = WaypointType.Start
act.GetFlatWaypoints()[-1].Type = WaypointType.End
else:
act.Stationary = True
if len(act.Laps) == 1:
act.Laps[0].Stats.update(act.Stats) # External source is authorative
act.Stats = act.Laps[0].Stats
else:
sum_stats = ActivityStatistics() # Blank
for lap in act.Laps:
sum_stats.sumWith(lap.Stats)
sum_stats.update(act.Stats)
act.Stats = sum_stats
act.CalculateUID()
return act
def Dump(activity):
root = etree.Element("TrainingCenterDatabase", nsmap=TCXIO.Namespaces)
activities = etree.SubElement(root, "Activities")
act = etree.SubElement(activities, "Activity")
author = etree.SubElement(root, "Author")
author.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "Application_t"
etree.SubElement(author, "Name").text = "tapiriik"
build = etree.SubElement(author, "Build")
version = etree.SubElement(build, "Version")
etree.SubElement(version, "VersionMajor").text = "0"
etree.SubElement(version, "VersionMinor").text = "0"
etree.SubElement(version, "BuildMajor").text = "0"
etree.SubElement(version, "BuildMinor").text = "0"
etree.SubElement(author, "LangID").text = "en"
etree.SubElement(author, "PartNumber").text = "000-00000-00"
dateFormat = "%Y-%m-%dT%H:%M:%S.000Z"
if activity.Name is not None and activity.Notes is not None:
etree.SubElement(act, "Notes").text = '\n'.join(activity.Name, activity.Notes)
elif activity.Name is not None:
etree.SubElement(act, "Notes").text = activity.Name
elif activity.Notes is not None:
etree.SubElement(act, "Notes").text = '\n' + activity.Notes
if activity.Type == ActivityType.Cycling:
act.attrib["Sport"] = "Biking"
elif activity.Type == ActivityType.Running:
act.attrib["Sport"] = "Running"
else:
act.attrib["Sport"] = "Other"
etree.SubElement(act, "Id").text = activity.StartTime.astimezone(UTC).strftime(dateFormat)
def _writeStat(parent, elName, value, wrapValue=False, naturalValue=False, default=None):
if value is not None or default is not None:
xstat = etree.SubElement(parent, elName)
if wrapValue:
xstat = etree.SubElement(xstat, "Value")
value = value if value is not None else default
xstat.text = str(value) if not naturalValue else str(int(value))
xlaps = []
for lap in activity.Laps:
xlap = etree.SubElement(act, "Lap")
xlaps.append(xlap)
xlap.attrib["StartTime"] = lap.StartTime.astimezone(UTC).strftime(dateFormat)
_writeStat(xlap, "TotalTimeSeconds", lap.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value if lap.Stats.TimerTime.Value else None, default=(lap.EndTime - lap.StartTime).total_seconds())
_writeStat(xlap, "DistanceMeters", lap.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_writeStat(xlap, "MaximumSpeed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Max)
_writeStat(xlap, "Calories", lap.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value, default=0, naturalValue=True)
_writeStat(xlap, "AverageHeartRateBpm", lap.Stats.HR.Average, naturalValue=True, wrapValue=True)
_writeStat(xlap, "MaximumHeartRateBpm", lap.Stats.HR.Max, naturalValue=True, wrapValue=True)
etree.SubElement(xlap, "Intensity").text = "Resting" if lap.Intensity == LapIntensity.Rest else "Active"
_writeStat(xlap, "Cadence", lap.Stats.Cadence.Average, naturalValue=True)
etree.SubElement(xlap, "TriggerMethod").text = ({
LapTriggerMethod.Manual: "Manual",
LapTriggerMethod.Distance: "Distance",
LapTriggerMethod.PositionMarked: "Location",
LapTriggerMethod.Time: "Time",
LapTriggerMethod.PositionStart: "Location",
LapTriggerMethod.PositionLap: "Location",
LapTriggerMethod.PositionMarked: "Location",
LapTriggerMethod.SessionEnd: "Manual",
LapTriggerMethod.FitnessEquipment: "Manual"
})[lap.Trigger]
if len([x for x in [lap.Stats.Cadence.Max, lap.Stats.RunCadence.Max, lap.Stats.RunCadence.Average, lap.Stats.Strides.Value, lap.Stats.Power.Max, lap.Stats.Power.Average, lap.Stats.Speed.Average] if x is not None]):
exts = etree.SubElement(xlap, "Extensions")
lapext = etree.SubElement(exts, "LX")
lapext.attrib["xmlns"] = "http://www.garmin.com/xmlschemas/ActivityExtension/v2"
_writeStat(lapext, "MaxBikeCadence", lap.Stats.Cadence.Max, naturalValue=True)
# This dividing-by-two stuff is getting silly
_writeStat(lapext, "MaxRunCadence", lap.Stats.RunCadence.Max if lap.Stats.RunCadence.Max is not None else None, naturalValue=True)
_writeStat(lapext, "AvgRunCadence", lap.Stats.RunCadence.Average if lap.Stats.RunCadence.Average is not None else None, naturalValue=True)
_writeStat(lapext, "Steps", lap.Stats.Strides.Value, naturalValue=True)
_writeStat(lapext, "MaxWatts", lap.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Max, naturalValue=True)
_writeStat(lapext, "AvgWatts", lap.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Average, naturalValue=True)
_writeStat(lapext, "AvgSpeed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Average)
inPause = False
for lap in activity.Laps:
xlap = xlaps[activity.Laps.index(lap)]
track = None
for wp in lap.Waypoints:
if wp.Type == WaypointType.Pause:
if inPause:
continue # this used to be an exception, but I don't think that was merited
inPause = True
if inPause and wp.Type != WaypointType.Pause:
inPause = False
if track is None: # Defer creating the track until there are points
track = etree.SubElement(xlap, "Track") # TODO - pauses should create new tracks instead of new laps?
trkpt = etree.SubElement(track, "Trackpoint")
if wp.Timestamp.tzinfo is None:
raise ValueError("TCX export requires TZ info")
etree.SubElement(trkpt, "Time").text = wp.Timestamp.astimezone(UTC).strftime(dateFormat)
if wp.Location:
if wp.Location.Latitude is not None and wp.Location.Longitude is not None:
pos = etree.SubElement(trkpt, "Position")
etree.SubElement(pos, "LatitudeDegrees").text = str(wp.Location.Latitude)
etree.SubElement(pos, "LongitudeDegrees").text = str(wp.Location.Longitude)
if wp.Location.Altitude is not None:
etree.SubElement(trkpt, "AltitudeMeters").text = str(wp.Location.Altitude)
if wp.Distance is not None:
etree.SubElement(trkpt, "DistanceMeters").text = str(wp.Distance)
if wp.HR is not None:
xhr = etree.SubElement(trkpt, "HeartRateBpm")
xhr.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "HeartRateInBeatsPerMinute_t"
etree.SubElement(xhr, "Value").text = str(int(wp.HR))
if wp.Cadence is not None:
etree.SubElement(trkpt, "Cadence").text = str(int(wp.Cadence))
if wp.Power is not None or wp.RunCadence is not None or wp.Speed is not None:
exts = etree.SubElement(trkpt, "Extensions")
gpxtpxexts = etree.SubElement(exts, "TPX")
gpxtpxexts.attrib["xmlns"] = "http://www.garmin.com/xmlschemas/ActivityExtension/v2"
if wp.Speed is not None:
etree.SubElement(gpxtpxexts, "Speed").text = str(wp.Speed)
if wp.RunCadence is not None:
etree.SubElement(gpxtpxexts, "RunCadence").text = str(int(wp.RunCadence))
if wp.Power is not None:
etree.SubElement(gpxtpxexts, "Watts").text = str(int(wp.Power))
if track is not None:
exts = xlap.find("Extensions")
if exts is not None:
track.addnext(exts)
if activity.Device and activity.Device.Identifier:
devId = DeviceIdentifier.FindEquivalentIdentifierOfType(DeviceIdentifierType.TCX, activity.Device.Identifier)
if devId:
xcreator = etree.SubElement(act, "Creator")
xcreator.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "Device_t"
etree.SubElement(xcreator, "Name").text = devId.Name
etree.SubElement(xcreator, "UnitId").text = str(activity.Device.Serial) if activity.Device.Serial else "0"
etree.SubElement(xcreator, "ProductID").text = str(devId.ProductID)
xver = etree.SubElement(xcreator, "Version")
etree.SubElement(xver, "VersionMajor").text = str(activity.Device.VersionMajor) if activity.Device.VersionMajor else "0" # Blegh.
etree.SubElement(xver, "VersionMinor").text = str(activity.Device.VersionMinor) if activity.Device.VersionMinor else "0"
etree.SubElement(xver, "BuildMajor").text = "0"
etree.SubElement(xver, "BuildMinor").text = "0"
return etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8").decode("UTF-8")
| |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from pyec.config import Config
from pyec.history import *
from pyec.space import *
from pyec.distribution.convex import Convex
from pyec.distribution.convolution import Convolution, SelfConvolution
from pyec.distribution.pso import ParticleSwarmOptimization as DE
from pyec.distribution.pso import PSOHistory
from pyec.distribution.truncation import TrajectoryTruncation
space = Euclidean(dim=5)
def run(opt):
"""A small test run to verify behavior"""
p = None
f = lambda x: (x**2).sum()
s = opt.config.space
t = opt.config.history(opt.config)
for i in xrange(10):
p = opt[t.update(p,f,s), f]()
def test_cls_getitem():
DE7 = DE[Config(populationSize=7)]
assert DE7.config.populationSize == 7
de7 = DE7(space=space)
assert de7.config.populationSize == 7
de5 = DE7(populationSize=5, space=space)
assert de5.config.populationSize == 5
run(de7)
run(de5)
assert isinstance(de7.history, PSOHistory)
assert isinstance(de5.history, PSOHistory)
def test_cls_convolve():
DEDE = DE << DE
assert issubclass(DEDE, Convolution)
dede = DEDE(populationSize=11, space=space)
assert dede.config.populationSize == 11
assert len(dede.subs) == 2
de = dede.subs[0]
assert de.config.populationSize == 11
assert isinstance(de, DE)
assert isinstance(dede, DEDE)
run(dede)
assert isinstance(dede.history, CheckpointedMultipleHistory)
assert isinstance(de.history, PSOHistory)
print "evals: ", dede.history.evals, de.history.evals
assert dede.history.evals == de.history.evals
def test_cls_self_convolve():
DEx10 = DE << 10
assert issubclass(DEx10, SelfConvolution)
dex10 = DEx10(populationSize=13, space=space)
assert dex10.config.populationSize == 13
assert dex10.times == 10
de = dex10.opt
assert de.config.populationSize == 13
assert isinstance(de, DE)
assert isinstance(dex10, DEx10)
run(dex10)
assert isinstance(dex10.history, CheckpointedHistory)
assert isinstance(de.history, PSOHistory)
assert dex10.history.evals == de.history.evals
def test_cls_scalar_multiply():
DE2 = 2 * DE
DE3_4 = DE2 * 1.7
assert issubclass(DE2, DE)
assert issubclass(DE3_4, DE)
assert issubclass(DE3_4, DE2)
assert DE.weight == 1.0
assert DE2.weight == 1.0 * 2
assert DE3_4.weight == 1.0 * 2 * 1.7
de = DE(populationSize=5, space=space)
de2 = DE2(populationSize=7, space=space)
de3_4 = DE3_4(populationSize=11, space=space)
assert de.weight == 1.0
assert de2.weight == 1.0 * 2
assert de3_4.weight == 1.0 * 2 * 1.7
assert de.config.populationSize == 5
assert de2.config.populationSize == 7
assert de3_4.config.populationSize == 11
run(de)
run(de2)
run(de3_4)
assert isinstance(de2.history, PSOHistory)
def test_cls_convex():
DEC = .1 * DE + .6 * DE
assert issubclass(DEC, Convex)
de = DEC(populationSize=13, space=space)
assert len(de.subs) == 2
assert de.subs[0].weight == .1
assert de.subs[1].weight == .6
assert isinstance(de.subs[0], .1*DE)
assert isinstance(de.subs[1], .6*DE)
run(de)
assert isinstance(de.history, MultipleHistory)
assert isinstance(de.subs[0].history, PSOHistory)
assert isinstance(de.subs[1].history, PSOHistory)
assert de.subs[0].history.evals == de.history.evals
def test_cls_truncate():
DET = DE >> 5
assert issubclass(DET, TrajectoryTruncation)
det = DET(populationSize=17, space=space)
assert isinstance(det.opt, DE)
assert det.delay == 5
run(det)
assert isinstance(det.history, DelayedHistory)
assert isinstance(det.opt.history, PSOHistory)
assert det.history.delay == 5
assert det.history.evals == det.opt.history.evals
def test_cls_truncate_convolve():
DET = DE >> DE
assert issubclass(DET, Convolution)
det = DET(populationSize=19, space=space)
assert len(det.subs) == 2
assert isinstance(det.subs[0], DE)
assert isinstance(det.subs[1], TrajectoryTruncation)
assert det.subs[1].delay == 1
assert isinstance(det.subs[1].opt, DE)
run(det)
assert isinstance(det.history, CheckpointedMultipleHistory)
assert isinstance(det.subs[0].history, PSOHistory)
assert isinstance(det.subs[1].history, DelayedHistory)
assert isinstance(det.subs[1].opt.history, PSOHistory)
assert det.history.evals == det.subs[0].history.evals
assert det.subs[1].history.evals == det.history.evals - 19
def test_obj_convolve():
DE2 = DE[Config(populationSize=11, space=space)]
dede = DE2() << DE2()
assert isinstance(dede, Convolution)
assert dede.config.populationSize == 11
assert len(dede.subs) == 2
de = dede.subs[0]
assert de.config.populationSize == 11
assert isinstance(de, DE2)
run(dede)
assert isinstance(dede.history, CheckpointedMultipleHistory)
assert isinstance(de.history, PSOHistory)
assert dede.history.evals == de.history.evals
def test_obj_self_convolve():
dex10 = DE(populationSize=13, space=space) << 10
assert isinstance(dex10, SelfConvolution)
assert dex10.config.populationSize == 13
assert dex10.times == 10
de = dex10.opt
assert de.config.populationSize == 13
assert isinstance(de, DE)
run(dex10)
assert isinstance(dex10.history, CheckpointedHistory)
assert isinstance(de.history, PSOHistory)
assert dex10.history.evals == de.history.evals
def test_obj_convex():
DE2 = DE[Config(populationSize=13, space=space)]
de = .1 * DE2() + .6 * DE2()
assert isinstance(de, Convex)
assert len(de.subs) == 2
assert de.subs[0].weight == .1
assert de.subs[1].weight == .6
assert isinstance(de.subs[0], DE2)
assert isinstance(de.subs[1], DE2)
run(de)
assert isinstance(de.history, MultipleHistory)
assert isinstance(de.subs[0].history, PSOHistory)
assert isinstance(de.subs[1].history, PSOHistory)
assert de.subs[0].history.evals == de.history.evals
def test_obj_scalar_multiply():
DE2 = DE[Config(populationSize=5, space=space)]
de = DE2()
de2 = 2 * DE2(populationSize=7)
de3_4 = de2 * 1.7
assert isinstance(de3_4, DE2)
assert de2 is not de
assert de3_4 is not de2
assert de.weight == 1.0
assert de2.weight == 1.0 * 2
assert de3_4.weight == 1.0 * 2 * 1.7
assert de.config.populationSize == 5
assert de2.config.populationSize == 7
assert de3_4.config.populationSize == 7
run(de)
run(de2)
run(de3_4)
assert isinstance(de2.history, PSOHistory)
def test_obj_truncate():
det = DE(populationSize=17, space=space) >> 5
assert isinstance(det, TrajectoryTruncation)
assert isinstance(det.opt, DE)
assert det.delay == 5
run(det)
assert isinstance(det.history, DelayedHistory)
assert isinstance(det.opt.history, PSOHistory)
assert det.history.delay == 5
assert det.history.evals == det.opt.history.evals
def test_obj_truncate_convolve():
DE2 = DE[Config(populationSize=19, space=space)]
det = DE2() >> DE2()
assert isinstance(det, Convolution)
assert isinstance(det.subs[0], DE)
assert isinstance(det.subs[1], TrajectoryTruncation)
assert det.subs[1].delay == 1
assert isinstance(det.subs[1].opt, DE)
run(det)
assert isinstance(det.history, CheckpointedMultipleHistory)
assert isinstance(det.subs[0].history, PSOHistory)
assert isinstance(det.subs[1].history, DelayedHistory)
assert isinstance(det.subs[1].opt.history, PSOHistory)
assert det.history.evals == det.subs[0].history.evals
assert det.subs[1].history.evals == det.history.evals - 19
| |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Alex Headley <aheadley@waysaboutstuff.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__all__ = ['decompress', 'compress', 'LZSS']
import struct
import logging
from naabal.util import StringIO
from naabal.util.bitio import BitReader, BitWriter
logger = logging.getLogger('naabal.util.lzss')
# MOD_WINDOW = lambda value: value & (LZSS.WINDOW_SIZE - 1)
MOD_WINDOW = lambda value: value % 4096
class LZSS(object):
INDEX_BIT_COUNT = 12
LENGTH_BIT_COUNT = 4
WINDOW_SIZE = 1 << INDEX_BIT_COUNT # 4096
RAW_LOOK_AHEAD_SIZE = 1 << LENGTH_BIT_COUNT # 16
BREAK_EVEN = (1 + INDEX_BIT_COUNT + LENGTH_BIT_COUNT) / 9 # 1
LOOK_AHEAD_SIZE = RAW_LOOK_AHEAD_SIZE + BREAK_EVEN
TREE_ROOT = WINDOW_SIZE
END_OF_STREAM = 0x000
UNUSED = 0
def compress_stream(self, input_buffer, output_buffer):
current_position = 1
match_length = 0
match_position = 0
window = bytearray(self.WINDOW_SIZE)
for i in xrange(self.LOOK_AHEAD_SIZE):
c = input_buffer.read(1)
if len(c) == 0:
break
window[current_position + i] = ord(c)
look_ahead_bytes = i + 1
tree = LZSSTree(current_position, window)
with BitWriter(output_buffer) as bit_writer:
while look_ahead_bytes > 0:
if match_length > look_ahead_bytes:
match_length = look_ahead_bytes
if match_length <= self.BREAK_EVEN:
replace_count = 1
bit_writer.write_bit(1)
bit_writer.write_bits(window[current_position], 8)
else:
bit_writer.write_bit(0)
bit_writer.write_bits(match_position, self.INDEX_BIT_COUNT)
bit_writer.write_bits(match_length - (self.BREAK_EVEN + 1), self.LENGTH_BIT_COUNT)
replace_count = match_length
for i in xrange(replace_count):
tree.delete_string(MOD_WINDOW(current_position + self.LOOK_AHEAD_SIZE))
c = input_buffer.read(1)
if len(c) == 0:
look_ahead_bytes -= 1
else:
window[MOD_WINDOW(current_position + self.LOOK_AHEAD_SIZE)] = ord(c)
current_position = MOD_WINDOW(current_position + 1)
if look_ahead_bytes:
match_length, match_position = tree.add_string(current_position, match_position)
# end while
bit_writer.write_bit(0)
bit_writer.write_bits(self.END_OF_STREAM, self.INDEX_BIT_COUNT)
size = bit_writer.index
return size
def compress(self, input_data):
input_handle = StringIO(input_data)
output_handle = StringIO()
self.compress_stream(input_handle, output_handle)
return output_handle.getvalue()
def decompress_stream(self, input_buffer, output_buffer):
current_position = 1
window = bytearray(self.WINDOW_SIZE)
output_buffer_pos_start = output_buffer.tell()
with BitReader(input_buffer) as bit_reader:
while True:
pass_through = bit_reader.read_bit()
if pass_through:
c = bit_reader.read_bits(8)
output_buffer.write(chr(c))
window[current_position] = c
current_position = MOD_WINDOW(current_position + 1)
else:
match_position = bit_reader.read_bits(self.INDEX_BIT_COUNT)
if match_position == self.END_OF_STREAM:
break
match_length = bit_reader.read_bits(self.LENGTH_BIT_COUNT)
match_length += self.BREAK_EVEN
for i in xrange(match_length + 1):
c = window[MOD_WINDOW(match_position + i)]
output_buffer.write(chr(c))
window[current_position] = c
current_position = MOD_WINDOW(current_position + 1)
return output_buffer.tell() - output_buffer_pos_start
def decompress(self, input_data):
input_handle = StringIO(input_data)
output_handle = StringIO()
self.decompress_stream(input_handle, output_handle)
return output_handle.getvalue()
def decompress(data):
return LZSS().decompress(data)
def compress(data):
return LZSS().compress(data)
class LZSSTreeNode(object):
parent = 0
smaller_child = 0
larger_child = 0
def __repr__(self):
return '<%4d . %4d . %4d>' % (self.parent, self.larger_child, self.smaller_child)
def copy_node(self, source_node):
self.parent = source_node.parent
self.smaller_child = source_node.smaller_child
self.larger_child = source_node.larger_child
class LZSSTree(object):
_data = None
def __init__(self, root_idx, window):
self._window = window
self._data = [LZSSTreeNode() for i in xrange(LZSS.WINDOW_SIZE + 1)]
self[LZSS.TREE_ROOT].larger_child = root_idx
self[root_idx].parent = LZSS.TREE_ROOT
self[root_idx].larger_child = LZSS.UNUSED
self[root_idx].smaller_child = LZSS.UNUSED
logger.debug('Init LZSS tree of %d elements', len(self._data))
def __repr__(self):
return repr(self._data)
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def contract_node(self, old_node, new_node):
self[new_node].parent = self[old_node].parent
if self[self[old_node].parent].larger_child == old_node:
self[self[old_node].parent].larger_child = new_node
else:
self[self[old_node].parent].smaller_child = new_node
self[old_node].parent = LZSS.UNUSED
def replace_node(self, old_node, new_node):
parent = self[old_node].parent
if self[parent].smaller_child == old_node:
self[parent].smaller_child = new_node
else:
self[parent].larger_child = new_node
self[new_node].copy_node(self[old_node])
self[self[new_node].smaller_child].parent = new_node
self[self[new_node].larger_child].parent = new_node
self[old_node].parent = LZSS.UNUSED
def find_next_node(self, node):
next = self[node].smaller_child
while self[next].larger_child != LZSS.UNUSED:
next = self[next].larger_child
return next
def delete_string(self, p):
if self[p].parent == LZSS.UNUSED:
return
if self[p].larger_child == LZSS.UNUSED:
self.contract_node(p, self[p].smaller_child)
elif self[p].smaller_child == LZSS.UNUSED:
self.contract_node(p, self[p].larger_child)
else:
replacement = self.find_next_node(p)
self.delete_string(replacement)
self.replace_node(p, replacement)
def add_string(self, new_node, match_position):
if new_node == LZSS.END_OF_STREAM:
return (0, match_position)
test_node = self[LZSS.TREE_ROOT].larger_child
match_length = 0
while True:
for i in xrange(LZSS.LOOK_AHEAD_SIZE):
delta = self._window[MOD_WINDOW(new_node + i)] - \
self._window[MOD_WINDOW(test_node + i)]
if delta != 0:
break
if i >= match_length:
match_length = i
match_position = test_node
if match_length >= LZSS.LOOK_AHEAD_SIZE:
self.replace_node(test_node, new_node)
return (match_length, match_position)
if delta >= 0:
child_attr = 'larger_child'
else:
child_attr = 'smaller_child'
if getattr(self[test_node], child_attr) == LZSS.UNUSED:
setattr(self[test_node], child_attr, new_node)
self[new_node].parent = test_node
self[new_node].larger_child = LZSS.UNUSED
self[new_node].smaller_child = LZSS.UNUSED
return (match_length, match_position)
test_node = getattr(self[test_node], child_attr)
| |
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class BinaryOpTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x1, self.x2, self.gy = self.make_data()
def check_forward(self, op, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
gradient_check.assert_allclose(op(self.x1, self.x2), y.data)
def forward_cpu(self, op):
self.check_forward(op, self.x1, self.x2)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__radd__(x))
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rsub__(x))
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rmul__(x))
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rtruediv__(x))
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rpow__(x))
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__radd__(x))
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rsub__(x))
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rmul__(x))
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rtruediv__(x))
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rpow__(x))
@attr.gpu
def test_add_constant_allocation(self):
x = 0
y = chainer.Variable(cuda.ones((1,)))
z = y + x
self.assertEqual(1, z.data.get()[0])
def check_backward(self, op, x1_data, x2_data, y_grad, atol):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x1.data, x2.data))
gx1, gx2 = gradient_check.numerical_grad(
f, (x1.data, x2.data), (y.grad,))
gradient_check.assert_allclose(gx1, x1.grad, atol=atol)
gradient_check.assert_allclose(gx2, x2.grad, atol=atol)
def backward_cpu(self, op, atol=1e-5):
self.check_backward(op, self.x1, self.x2, self.gy, atol)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y, atol=1e-4)
def backward_gpu(self, op, atol=1e-5):
self.check_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy), atol)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y, atol=1e-4)
class TestBinaryOpSimple(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpZeroDimension(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpConstant(unittest.TestCase):
def _test_constant_one(self, func, lhs, rhs, gpu=False):
if gpu:
lhs = cuda.to_gpu(lhs)
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant(self, func):
x_data = numpy.array(1, numpy.float32)
self._test_constant_one(func, x_data, 1)
self._test_constant_one(func, x_data, 1.0)
self._test_constant_one(func, x_data, numpy.int64(1))
self._test_constant_one(func, x_data, numpy.float64(1.0))
def _test_constant_gpu(self, func):
x_data = numpy.array(1, numpy.float32)
self._test_constant_one(func, x_data, 1, True)
self._test_constant_one(func, x_data, 1.0, True)
self._test_constant_one(func, x_data, numpy.int64(1), True)
self._test_constant_one(func, x_data, numpy.float64(1), True)
def _test_constant_array_one(self, func, lhs, rhs):
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = numpy.ones_like(y.data, numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant_array(self, func):
x_data = numpy.array([1.0, 2.0], numpy.float32)
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.int32))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.int64))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.float32))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.float64))
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, [3.0, 4.0])
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, (3.0, 4.0))
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, [3.0, 4.0, 5.0])
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, (3.0, 4.0, 5.0))
with self.assertRaises(ValueError):
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0, 5.0], numpy.float32))
def _test_constant_array_gpu_one(self, func, lhs, rhs):
x = chainer.Variable(cuda.to_gpu(lhs))
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = chainer.cuda.ones_like(y.data).astype(numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant_array_gpu(self, func, exception=TypeError):
x_data = numpy.array([1.0, 2.0], numpy.float32)
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int32)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int64)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float32)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float64)))
with self.assertRaises(exception):
self._test_constant_array_one(
func, x_data, cuda.to_gpu(
numpy.array([3.0, 4.0, 5.0], numpy.float32)))
def test_add_constant(self):
self._test_constant(lambda x, y: x + y)
@attr.gpu
def test_add_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x + y)
def test_add_constant_array(self):
self._test_constant_array(lambda x, y: x + y)
@attr.gpu
def test_add_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x + y)
def test_radd_constant(self):
self._test_constant(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y + x)
def test_radd_constant_array(self):
self._test_constant_array(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y + x)
def test_sub_constant(self):
self._test_constant(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x - y)
def test_sub_constant_array(self):
self._test_constant_array(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x - y)
def test_rsub_constant(self):
self._test_constant(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y - x)
def test_rsub_constant_array(self):
self._test_constant_array(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y - x)
def test_mul_constant(self):
self._test_constant(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x * y)
def test_mul_constant_array(self):
self._test_constant_array(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_array_gpu(self):
self._test_constant_array(lambda x, y: x * y)
def test_rmul_constant(self):
self._test_constant(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y * x)
def test_rmul_constant_array(self):
self._test_constant_array(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y * x, exception=Exception)
def test_div_constant(self):
self._test_constant(lambda x, y: x / y)
@attr.gpu
def test_div_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x / y)
def test_div_constant_array(self):
self._test_constant_array(lambda x, y: x / y)
@attr.gpu
def test_div_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: x / y, exception=Exception)
def test_rdiv_constant(self):
self._test_constant(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y / x)
def test_rdiv_constant_array(self):
self._test_constant_array(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y / x)
def test_pow_constant(self):
self._test_constant(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x ** y)
def test_pow_constant_array(self):
self._test_constant_array(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x ** y, exception=TypeError)
def test_rpow_constant(self):
self._test_constant(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y ** x)
def test_rpow_constant_array(self):
self._test_constant_array(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y ** x, exception=Exception)
class VariableConstantOpTestBase(object):
def make_date(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy, self.value = self.make_data()
def check_forward(self, op, x_data):
x = chainer.Variable(x_data)
y = op(x, self.value)
gradient_check.assert_allclose(
op(self.x, self.value), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op):
self.check_forward(op, self.x)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x)
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x)
def check_backward(self, op, x_data, y_grad):
x = chainer.Variable(x_data)
y = op(x, self.value)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_backward_gpu(self):
self.backward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x)
class TestVariableConstantOpSimple(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantOpZeroDimension(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantArrayOp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_forward(self, op, x_data, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
v = value
if gpu:
v = cuda.to_gpu(v)
x = chainer.Variable(x_data)
y = op(x, v)
gradient_check.assert_allclose(
op(self.x, value), y.data, atol=1e-6, rtol=1e-6)
def forward_cpu(self, op, positive=False):
self.check_forward(op, self.x, False, positive)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x, positive=True)
def forward_gpu(self, op, positive=False):
self.check_forward(op, cuda.to_gpu(self.x), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x, positive=True)
def check_backward(self, op, x_data, y_grad, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
if gpu:
value = cuda.to_gpu(value)
x = chainer.Variable(x_data)
y = op(x, value)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad, atol=1e-4, rtol=1e-4)
def backward_cpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, False, positive)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x, positive=True)
def backward_gpu(self, op, positive=False):
self.check_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x, positive=True)
class UnaryFunctionsTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy = self.make_data()
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
gradient_check.assert_allclose(
op_np(self.x), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
@condition.retry(3)
def test_neg_forward_cpu(self):
self.forward_cpu(lambda x: -x, lambda x: -x)
@condition.retry(3)
def test_abs_forward_cpu(self):
self.forward_cpu(lambda x: abs(x), lambda x: abs(x))
@condition.retry(3)
def test_exp_forward_cpu(self):
self.forward_cpu(F.exp, numpy.exp)
@condition.retry(3)
def test_log_forward_cpu(self):
self.forward_cpu(F.log, numpy.log)
@condition.retry(3)
def test_sin_forward_cpu(self):
self.forward_cpu(F.sin, numpy.sin)
@condition.retry(3)
def test_cos_forward_cpu(self):
self.forward_cpu(F.cos, numpy.cos)
def forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_neg_forward_gpu(self):
self.forward_gpu(lambda x: -x, lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_forward_gpu(self):
self.forward_gpu(lambda x: abs(x), lambda x: abs(x))
@attr.gpu
@condition.retry(3)
def test_exp_forward_gpu(self):
self.forward_gpu(F.exp, numpy.exp)
@attr.gpu
@condition.retry(3)
def test_log_forward_gpu(self):
self.forward_gpu(F.log, numpy.log)
@attr.gpu
@condition.retry(3)
def test_sin_forward_gpu(self):
self.forward_gpu(F.sin, numpy.sin)
@attr.gpu
@condition.retry(3)
def test_cos_forward_gpu(self):
self.forward_gpu(F.cos, numpy.cos)
def check_backward(self, op, x_data, y_grad):
x = chainer.Variable(x_data)
y = op(x)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_neg_backward_cpu(self):
self.backward_cpu(lambda x: -x)
@condition.retry(3)
def test_abs_backward_cpu(self):
self.backward_cpu(lambda x: abs(x))
@condition.retry(3)
def test_exp_backward_cpu(self):
self.backward_cpu(F.exp)
@condition.retry(3)
def test_log_backward_cpu(self):
self.backward_cpu(F.log)
@condition.retry(3)
def test_sin_backward_cpu(self):
self.backward_cpu(F.sin)
@condition.retry(3)
def test_cos_backward_cpu(self):
self.backward_cpu(F.cos)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_neg_backward_gpu(self):
self.backward_gpu(lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_backward_gpu(self):
self.backward_gpu(lambda x: abs(x))
@attr.gpu
@condition.retry(3)
def test_exp_backward_gpu(self):
self.backward_gpu(F.exp)
@attr.gpu
@condition.retry(3)
def test_log_backward_gpu(self):
self.backward_gpu(F.log)
@attr.gpu
@condition.retry(3)
def test_sin_backward_gpu(self):
self.backward_gpu(F.sin)
@attr.gpu
@condition.retry(3)
def test_cos_backward_gpu(self):
self.backward_gpu(F.cos)
class TestUnaryFunctionsSimple(UnaryFunctionsTestBase, unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x, gy
class TestUnaryFunctionsZeroDimension(UnaryFunctionsTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x, gy
class TestNegativePow(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 0, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = x ** 2
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad, atol=1e-4, rtol=1e-4)
def test_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestNotSupportOperation(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.zeros(10))
self.y = chainer.Variable(numpy.zeros(10))
def test_lt(self):
with self.assertRaises(NotImplementedError):
self.x < self.y
def test_le(self):
with self.assertRaises(NotImplementedError):
self.x <= self.y
def test_eq(self):
with self.assertRaises(NotImplementedError):
self.x == self.y
def test_ne(self):
with self.assertRaises(NotImplementedError):
self.x != self.y
def test_gt(self):
with self.assertRaises(NotImplementedError):
self.x > self.y
def test_ge(self):
with self.assertRaises(NotImplementedError):
self.x >= self.y
def test_nonzero(self):
with self.assertRaises(NotImplementedError):
if self.x:
pass
testing.run_module(__name__, __file__)
| |
"""
This module implements the ssh (Secure SHell) protocol for encrypted
connections.
This depends on a generic session module that implements the actual
login procedure of the game, tracks sessions etc.
Using standard ssh client,
"""
from __future__ import print_function
from builtins import object
import os
import re
from twisted.cred.checkers import credentials
from twisted.cred.portal import Portal
from twisted.conch.interfaces import IConchUser
_SSH_IMPORT_ERROR = """
ERROR: Missing crypto library for SSH. Install it with
pip install cryptography
(On older Twisted versions you may have to do 'pip install pycrypto pyasn1 instead).
If you get a compilation error you must install a C compiler and the
SSL dev headers (On Debian-derived systems this is the gcc and libssl-dev
packages).
"""
try:
from twisted.conch.ssh.keys import Key
except ImportError:
raise ImportError(_SSH_IMPORT_ERROR)
from twisted.conch.ssh.userauth import SSHUserAuthServer
from twisted.conch.ssh import common
from twisted.conch.insults import insults
from twisted.conch.manhole_ssh import TerminalRealm, _Glue, ConchFactory
from twisted.conch.manhole import Manhole, recvline
from twisted.internet import defer
from twisted.conch import interfaces as iconch
from twisted.python import components
from django.conf import settings
from evennia.server import session
from evennia.players.models import PlayerDB
from evennia.utils import ansi
from evennia.utils.utils import to_str
_RE_N = re.compile(r"\|n$")
_RE_SCREENREADER_REGEX = re.compile(r"%s" % settings.SCREENREADER_REGEX_STRIP, re.DOTALL + re.MULTILINE)
_GAME_DIR = settings.GAME_DIR
CTRL_C = '\x03'
CTRL_D = '\x04'
CTRL_BACKSLASH = '\x1c'
CTRL_L = '\x0c'
class SshProtocol(Manhole, session.Session):
"""
Each player connecting over ssh gets this protocol assigned to
them. All communication between game and player goes through
here.
"""
def __init__(self, starttuple):
"""
For setting up the player. If player is not None then we'll
login automatically.
Args:
starttuple (tuple): A (player, factory) tuple.
"""
self.authenticated_player = starttuple[0]
# obs must not be called self.factory, that gets overwritten!
self.cfactory = starttuple[1]
def terminalSize(self, width, height):
"""
Initialize the terminal and connect to the new session.
Args:
width (int): Width of terminal.
height (int): Height of terminal.
"""
# Clear the previous input line, redraw it at the new
# cursor position
self.terminal.eraseDisplay()
self.terminal.cursorHome()
self.width = width
self.height = height
# initialize the session
client_address = self.getClientAddress()
client_address = client_address.host if client_address else None
self.init_session("ssh", client_address, self.cfactory.sessionhandler)
# since we might have authenticated already, we might set this here.
if self.authenticated_player:
self.logged_in = True
self.uid = self.authenticated_player.user.id
self.sessionhandler.connect(self)
def connectionMade(self):
"""
This is called when the connection is first established.
"""
recvline.HistoricRecvLine.connectionMade(self)
self.keyHandlers[CTRL_C] = self.handle_INT
self.keyHandlers[CTRL_D] = self.handle_EOF
self.keyHandlers[CTRL_L] = self.handle_FF
self.keyHandlers[CTRL_BACKSLASH] = self.handle_QUIT
# initalize
def handle_INT(self):
"""
Handle ^C as an interrupt keystroke by resetting the current
input variables to their initial state.
"""
self.lineBuffer = []
self.lineBufferIndex = 0
self.terminal.nextLine()
self.terminal.write("KeyboardInterrupt")
self.terminal.nextLine()
def handle_EOF(self):
"""
Handles EOF generally used to exit.
"""
if self.lineBuffer:
self.terminal.write('\a')
else:
self.handle_QUIT()
def handle_FF(self):
"""
Handle a 'form feed' byte - generally used to request a screen
refresh/redraw.
"""
self.terminal.eraseDisplay()
self.terminal.cursorHome()
def handle_QUIT(self):
"""
Quit, end, and lose the connection.
"""
self.terminal.loseConnection()
def connectionLost(self, reason=None):
"""
This is executed when the connection is lost for whatever
reason. It can also be called directly, from the disconnect
method.
Args:
reason (str): Motivation for loosing connection.
"""
insults.TerminalProtocol.connectionLost(self, reason)
self.sessionhandler.disconnect(self)
self.terminal.loseConnection()
def getClientAddress(self):
"""
Get client address.
Returns:
address_and_port (tuple): The client's address and port in
a tuple. For example `('127.0.0.1', 41917)`.
"""
return self.terminal.transport.getPeer()
def lineReceived(self, string):
"""
Communication User -> Evennia. Any line return indicates a
command for the purpose of the MUD. So we take the user input
and pass it on to the game engine.
Args:
string (str): Input text.
"""
self.sessionhandler.data_in(self, text=string)
def sendLine(self, string):
"""
Communication Evennia -> User. Any string sent should
already have been properly formatted and processed before
reaching this point.
Args:
string (str): Output text.
"""
for line in string.split('\n'):
# the telnet-specific method for sending
self.terminal.write(line)
self.terminal.nextLine()
# session-general method hooks
def disconnect(self, reason="Connection closed. Goodbye for now."):
"""
Disconnect from server.
Args:
reason (str): Motivation for disconnect.
"""
if reason:
self.data_out(text=reason)
self.connectionLost(reason)
def data_out(self, **kwargs):
"""
Data Evennia -> User
Kwargs:
kwargs (any): Options to the protocol.
"""
self.sessionhandler.data_out(self, **kwargs)
def send_text(self, *args, **kwargs):
"""
Send text data. This is an in-band telnet operation.
Args:
text (str): The first argument is always the text string to send. No other arguments
are considered.
Kwargs:
options (dict): Send-option flags
- mxp: Enforce MXP link support.
- ansi: Enforce no ANSI colors.
- xterm256: Enforce xterm256 colors, regardless of TTYPE setting.
- nocolor: Strip all colors.
- raw: Pass string through without any ansi processing
(i.e. include Evennia ansi markers but do not
convert them into ansi tokens)
- echo: Turn on/off line echo on the client. Turn
off line echo for client, for example for password.
Note that it must be actively turned back on again!
"""
# print "telnet.send_text", args,kwargs # DEBUG
text = args[0] if args else ""
if text is None:
return
text = to_str(text, force_string=True)
# handle arguments
options = kwargs.get("options", {})
flags = self.protocol_flags
xterm256 = options.get("xterm256", flags.get('XTERM256', True))
useansi = options.get("ansi", flags.get('ANSI', True))
raw = options.get("raw", flags.get("RAW", False))
nocolor = options.get("nocolor", flags.get("NOCOLOR") or not (xterm256 or useansi))
# echo = options.get("echo", None) # DEBUG
screenreader = options.get("screenreader", flags.get("SCREENREADER", False))
if screenreader:
# screenreader mode cleans up output
text = ansi.parse_ansi(text, strip_ansi=True, xterm256=False, mxp=False)
text = _RE_SCREENREADER_REGEX.sub("", text)
if raw:
# no processing
self.sendLine(text)
return
else:
# we need to make sure to kill the color at the end in order
# to match the webclient output.
linetosend = ansi.parse_ansi(_RE_N.sub("", text) + ("||n" if text.endswith("|") else "|n"),
strip_ansi=nocolor, xterm256=xterm256, mxp=False)
self.sendLine(linetosend)
def send_prompt(self, *args, **kwargs):
self.send_text(*args, **kwargs)
def send_default(self, *args, **kwargs):
pass
class ExtraInfoAuthServer(SSHUserAuthServer):
def auth_password(self, packet):
"""
Password authentication.
Used mostly for setting up the transport so we can query
username and password later.
Args:
packet (Packet): Auth packet.
"""
password = common.getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
c.transport = self.transport
return self.portal.login(c, None, IConchUser).addErrback(
self._ebPassword)
class PlayerDBPasswordChecker(object):
"""
Checks the django db for the correct credentials for
username/password otherwise it returns the player or None which is
useful for the Realm.
"""
credentialInterfaces = (credentials.IUsernamePassword,)
def __init__(self, factory):
"""
Initialize the factory.
Args:
factory (SSHFactory): Checker factory.
"""
self.factory = factory
super(PlayerDBPasswordChecker, self).__init__()
def requestAvatarId(self, c):
"""
Generic credentials.
"""
up = credentials.IUsernamePassword(c, None)
username = up.username
password = up.password
player = PlayerDB.objects.get_player_from_name(username)
res = (None, self.factory)
if player and player.check_password(password):
res = (player, self.factory)
return defer.succeed(res)
class PassAvatarIdTerminalRealm(TerminalRealm):
"""
Returns an avatar that passes the avatarId through to the
protocol. This is probably not the best way to do it.
"""
def _getAvatar(self, avatarId):
comp = components.Componentized()
user = self.userFactory(comp, avatarId)
sess = self.sessionFactory(comp)
sess.transportFactory = self.transportFactory
sess.chainedProtocolFactory = lambda: self.chainedProtocolFactory(avatarId)
comp.setComponent(iconch.IConchUser, user)
comp.setComponent(iconch.ISession, sess)
return user
class TerminalSessionTransport_getPeer(object):
"""
Taken from twisted's TerminalSessionTransport which doesn't
provide getPeer to the transport. This one does.
"""
def __init__(self, proto, chainedProtocol, avatar, width, height):
self.proto = proto
self.avatar = avatar
self.chainedProtocol = chainedProtocol
session = self.proto.session
self.proto.makeConnection(
_Glue(write=self.chainedProtocol.dataReceived,
loseConnection=lambda: avatar.conn.sendClose(session),
name="SSH Proto Transport"))
def loseConnection():
self.proto.loseConnection()
def getPeer():
return session.conn.transport.transport.getPeer()
self.chainedProtocol.makeConnection(
_Glue(getPeer=getPeer, write=self.proto.write,
loseConnection=loseConnection,
name="Chained Proto Transport"))
self.chainedProtocol.terminalProtocol.terminalSize(width, height)
def getKeyPair(pubkeyfile, privkeyfile):
"""
This function looks for RSA keypair files in the current directory. If they
do not exist, the keypair is created.
"""
if not (os.path.exists(pubkeyfile) and os.path.exists(privkeyfile)):
# No keypair exists. Generate a new RSA keypair
print(" Generating SSH RSA keypair ...", end=' ')
from Crypto.PublicKey import RSA
KEY_LENGTH = 1024
rsaKey = Key(RSA.generate(KEY_LENGTH))
publicKeyString = rsaKey.public().toString(type="OPENSSH")
privateKeyString = rsaKey.toString(type="OPENSSH")
# save keys for the future.
file(pubkeyfile, 'w+b').write(publicKeyString)
file(privkeyfile, 'w+b').write(privateKeyString)
print(" done.")
else:
publicKeyString = file(pubkeyfile).read()
privateKeyString = file(privkeyfile).read()
return Key.fromString(publicKeyString), Key.fromString(privateKeyString)
def makeFactory(configdict):
"""
Creates the ssh server factory.
"""
pubkeyfile = os.path.join(_GAME_DIR, "server", "ssh-public.key")
privkeyfile = os.path.join(_GAME_DIR, "server", "ssh-private.key")
def chainProtocolFactory(username=None):
return insults.ServerProtocol(
configdict['protocolFactory'],
*configdict.get('protocolConfigdict', (username,)),
**configdict.get('protocolKwArgs', {}))
rlm = PassAvatarIdTerminalRealm()
rlm.transportFactory = TerminalSessionTransport_getPeer
rlm.chainedProtocolFactory = chainProtocolFactory
factory = ConchFactory(Portal(rlm))
factory.sessionhandler = configdict['sessions']
try:
# create/get RSA keypair
publicKey, privateKey = getKeyPair(pubkeyfile, privkeyfile)
factory.publicKeys = {'ssh-rsa': publicKey}
factory.privateKeys = {'ssh-rsa': privateKey}
except Exception as err:
print("getKeyPair error: {err}\n WARNING: Evennia could not "
"auto-generate SSH keypair. Using conch default keys instead.\n"
"If this error persists, create {pub} and "
"{priv} yourself using third-party tools.".format(err=err, pub=pubkeyfile, priv=privkeyfile))
factory.services = factory.services.copy()
factory.services['ssh-userauth'] = ExtraInfoAuthServer
factory.portal.registerChecker(PlayerDBPasswordChecker(factory))
return factory
| |
import numpy as np
import hail as hl
import unittest
import pytest
from ..helpers import *
from hail.utils import new_temp_file
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
@skip_unless_spark_backend()
def test_ld_score(self):
ht = hl.import_table(doctest_resource('ldsc.annot'),
types={'BP': hl.tint,
'CM': hl.tfloat,
'binary': hl.tint,
'continuous': hl.tfloat})
ht = ht.annotate(locus=hl.locus(ht.CHR, ht.BP))
ht = ht.key_by('locus')
mt = hl.import_plink(bed=doctest_resource('ldsc.bed'),
bim=doctest_resource('ldsc.bim'),
fam=doctest_resource('ldsc.fam'))
mt = mt.annotate_rows(binary=ht[mt.locus].binary,
continuous=ht[mt.locus].continuous)
ht_univariate = hl.experimental.ld_score(
entry_expr=mt.GT.n_alt_alleles(),
locus_expr=mt.locus,
radius=1.0,
coord_expr=mt.cm_position)
ht_annotated = hl.experimental.ld_score(
entry_expr=mt.GT.n_alt_alleles(),
locus_expr=mt.locus,
radius=1.0,
coord_expr=mt.cm_position,
annotation_exprs=[mt.binary,
mt.continuous])
univariate = ht_univariate.aggregate(hl.struct(
chr20=hl.agg.filter(
(ht_univariate.locus.contig == '20') &
(ht_univariate.locus.position == 82079),
hl.agg.collect(ht_univariate.univariate))[0],
chr22 =hl.agg.filter(
(ht_univariate.locus.contig == '22') &
(ht_univariate.locus.position == 16894090),
hl.agg.collect(ht_univariate.univariate))[0],
mean=hl.agg.mean(ht_univariate.univariate)))
self.assertAlmostEqual(univariate.chr20, 1.601, places=3)
self.assertAlmostEqual(univariate.chr22, 1.140, places=3)
self.assertAlmostEqual(univariate.mean, 3.507, places=3)
annotated = ht_annotated.aggregate(
hl.struct(
chr20=hl.struct(binary=hl.agg.filter(
(ht_annotated.locus.contig == '20') &
(ht_annotated.locus.position == 82079),
hl.agg.collect(ht_annotated.binary))[0],
continuous=hl.agg.filter(
(ht_annotated.locus.contig == '20') &
(ht_annotated.locus.position == 82079),
hl.agg.collect(ht_annotated.continuous))[0]),
chr22=hl.struct(
binary=hl.agg.filter(
(ht_annotated.locus.contig == '22') &
(ht_annotated.locus.position == 16894090),
hl.agg.collect(ht_annotated.binary))[0],
continuous=hl.agg.filter(
(ht_annotated.locus.contig == '22') &
(ht_annotated.locus.position == 16894090),
hl.agg.collect(ht_annotated.continuous))[0]),
mean_stats=hl.struct(binary=hl.agg.mean(ht_annotated.binary),
continuous=hl.agg.mean(ht_annotated.continuous))))
self.assertAlmostEqual(annotated.chr20.binary, 1.152, places=3)
self.assertAlmostEqual(annotated.chr20.continuous, 73.014, places=3)
self.assertAlmostEqual(annotated.chr22.binary, 1.107, places=3)
self.assertAlmostEqual(annotated.chr22.continuous, 102.174, places=3)
self.assertAlmostEqual(annotated.mean_stats.binary, 0.965, places=3)
self.assertAlmostEqual(annotated.mean_stats.continuous, 176.528, places=3)
@skip_unless_spark_backend()
def test_plot_roc_curve(self):
x = hl.utils.range_table(100).annotate(score1=hl.rand_norm(), score2=hl.rand_norm())
x = x.annotate(tp=hl.if_else(x.score1 > 0, hl.rand_bool(0.7), False), score3=x.score1 + hl.rand_norm())
ht = x.annotate(fp=hl.if_else(~x.tp, hl.rand_bool(0.2), False))
_, aucs = hl.experimental.plot_roc_curve(ht, ['score1', 'score2', 'score3'])
def test_import_keyby_count_ldsc_lowered_shuffle(self):
# integration test pulled out of test_ld_score_regression to isolate issues with lowered shuffles
# and RDD serialization, 2021-07-06
# if this comment no longer reflects the backend system, that's a really good thing
ht_scores = hl.import_table(
doctest_resource('ld_score_regression.univariate_ld_scores.tsv'),
key='SNP', types={'L2': hl.tfloat, 'BP': hl.tint})
ht_20160 = hl.import_table(
doctest_resource('ld_score_regression.20160.sumstats.tsv'),
key='SNP', types={'N': hl.tint, 'Z': hl.tfloat})
j1 = ht_scores[ht_20160['SNP']]
ht_20160 = ht_20160.annotate(
ld_score=j1['L2'],
locus=hl.locus(j1['CHR'],
j1['BP']),
alleles=hl.array([ht_20160['A2'], ht_20160['A1']]))
ht_20160 = ht_20160.key_by(ht_20160['locus'],
ht_20160['alleles'])
assert ht_20160._force_count() == 151
@pytest.mark.unchecked_allocator
@skip_when_service_backend('hangs >5 minutes; last message is "all results compelte" in ServiceBackend.parallelizeAndComputeWithIndex')
def test_ld_score_regression(self):
ht_scores = hl.import_table(
doctest_resource('ld_score_regression.univariate_ld_scores.tsv'),
key='SNP', types={'L2': hl.tfloat, 'BP': hl.tint})
ht_50_irnt = hl.import_table(
doctest_resource('ld_score_regression.50_irnt.sumstats.tsv'),
key='SNP', types={'N': hl.tint, 'Z': hl.tfloat})
ht_50_irnt = ht_50_irnt.annotate(
chi_squared=ht_50_irnt['Z']**2,
n=ht_50_irnt['N'],
ld_score=ht_scores[ht_50_irnt['SNP']]['L2'],
locus=hl.locus(ht_scores[ht_50_irnt['SNP']]['CHR'],
ht_scores[ht_50_irnt['SNP']]['BP']),
alleles=hl.array([ht_50_irnt['A2'], ht_50_irnt['A1']]),
phenotype='50_irnt')
ht_50_irnt = ht_50_irnt.key_by(ht_50_irnt['locus'],
ht_50_irnt['alleles'])
ht_50_irnt = ht_50_irnt.select(ht_50_irnt['chi_squared'],
ht_50_irnt['n'],
ht_50_irnt['ld_score'],
ht_50_irnt['phenotype'])
ht_20160 = hl.import_table(
doctest_resource('ld_score_regression.20160.sumstats.tsv'),
key='SNP', types={'N': hl.tint, 'Z': hl.tfloat})
ht_20160 = ht_20160.annotate(
chi_squared=ht_20160['Z']**2,
n=ht_20160['N'],
ld_score=ht_scores[ht_20160['SNP']]['L2'],
locus=hl.locus(ht_scores[ht_20160['SNP']]['CHR'],
ht_scores[ht_20160['SNP']]['BP']),
alleles=hl.array([ht_20160['A2'], ht_20160['A1']]),
phenotype='20160')
ht_20160 = ht_20160.key_by(ht_20160['locus'],
ht_20160['alleles'])
ht_20160 = ht_20160.select(ht_20160['chi_squared'],
ht_20160['n'],
ht_20160['ld_score'],
ht_20160['phenotype'])
ht = ht_50_irnt.union(ht_20160)
mt = ht.to_matrix_table(row_key=['locus', 'alleles'],
col_key=['phenotype'],
row_fields=['ld_score'],
col_fields=[])
mt_tmp = new_temp_file()
mt.write(mt_tmp, overwrite=True)
mt = hl.read_matrix_table(mt_tmp)
ht_results = hl.experimental.ld_score_regression(
weight_expr=mt['ld_score'],
ld_score_expr=mt['ld_score'],
chi_sq_exprs=mt['chi_squared'],
n_samples_exprs=mt['n'],
n_blocks=20,
two_step_threshold=5,
n_reference_panel_variants=1173569)
results = {
x['phenotype']: {
'mean_chi_sq': x['mean_chi_sq'],
'intercept_estimate': x['intercept']['estimate'],
'intercept_standard_error': x['intercept']['standard_error'],
'snp_heritability_estimate': x['snp_heritability']['estimate'],
'snp_heritability_standard_error':
x['snp_heritability']['standard_error']}
for x in ht_results.collect()}
self.assertAlmostEqual(
results['50_irnt']['mean_chi_sq'],
3.4386, places=4)
self.assertAlmostEqual(
results['50_irnt']['intercept_estimate'],
0.7727, places=4)
self.assertAlmostEqual(
results['50_irnt']['intercept_standard_error'],
0.2461, places=4)
self.assertAlmostEqual(
results['50_irnt']['snp_heritability_estimate'],
0.3845, places=4)
self.assertAlmostEqual(
results['50_irnt']['snp_heritability_standard_error'],
0.1067, places=4)
self.assertAlmostEqual(
results['20160']['mean_chi_sq'],
1.5209, places=4)
self.assertAlmostEqual(
results['20160']['intercept_estimate'],
1.2109, places=4)
self.assertAlmostEqual(
results['20160']['intercept_standard_error'],
0.2238, places=4)
self.assertAlmostEqual(
results['20160']['snp_heritability_estimate'],
0.0486, places=4)
self.assertAlmostEqual(
results['20160']['snp_heritability_standard_error'],
0.0416, places=4)
ht = ht_50_irnt.annotate(
chi_squared_50_irnt=ht_50_irnt['chi_squared'],
n_50_irnt=ht_50_irnt['n'],
chi_squared_20160=ht_20160[ht_50_irnt.key]['chi_squared'],
n_20160=ht_20160[ht_50_irnt.key]['n'])
ht_results = hl.experimental.ld_score_regression(
weight_expr=ht['ld_score'],
ld_score_expr=ht['ld_score'],
chi_sq_exprs=[ht['chi_squared_50_irnt'],
ht['chi_squared_20160']],
n_samples_exprs=[ht['n_50_irnt'],
ht['n_20160']],
n_blocks=20,
two_step_threshold=5,
n_reference_panel_variants=1173569)
results = {
x['phenotype']: {
'mean_chi_sq': x['mean_chi_sq'],
'intercept_estimate': x['intercept']['estimate'],
'intercept_standard_error': x['intercept']['standard_error'],
'snp_heritability_estimate': x['snp_heritability']['estimate'],
'snp_heritability_standard_error':
x['snp_heritability']['standard_error']}
for x in ht_results.collect()}
self.assertAlmostEqual(
results[0]['mean_chi_sq'],
3.4386, places=4)
self.assertAlmostEqual(
results[0]['intercept_estimate'],
0.7727, places=4)
self.assertAlmostEqual(
results[0]['intercept_standard_error'],
0.2461, places=4)
self.assertAlmostEqual(
results[0]['snp_heritability_estimate'],
0.3845, places=4)
self.assertAlmostEqual(
results[0]['snp_heritability_standard_error'],
0.1067, places=4)
self.assertAlmostEqual(
results[1]['mean_chi_sq'],
1.5209, places=4)
self.assertAlmostEqual(
results[1]['intercept_estimate'],
1.2109, places=4)
self.assertAlmostEqual(
results[1]['intercept_standard_error'],
0.2238, places=4)
self.assertAlmostEqual(
results[1]['snp_heritability_estimate'],
0.0486, places=4)
self.assertAlmostEqual(
results[1]['snp_heritability_standard_error'],
0.0416, places=4)
@skip_when_service_backend('very slow / nonterminating')
def test_sparse(self):
expected_split_mt = hl.import_vcf(resource('sparse_split_test_b.vcf'))
unsplit_mt = hl.import_vcf(resource('sparse_split_test.vcf'), call_fields=['LGT', 'LPGT'])
mt = (hl.experimental.sparse_split_multi(unsplit_mt)
.drop('a_index', 'was_split').select_entries(*expected_split_mt.entry.keys()))
assert mt._same(expected_split_mt)
@fails_service_backend()
def test_define_function(self):
f1 = hl.experimental.define_function(
lambda a, b: (a + 7) * b, hl.tint32, hl.tint32)
self.assertEqual(hl.eval(f1(1, 3)), 24)
f2 = hl.experimental.define_function(
lambda a, b: (a + 7) * b, hl.tint32, hl.tint32)
self.assertEqual(hl.eval(f1(1, 3)), 24) # idempotent
self.assertEqual(hl.eval(f2(1, 3)), 24) # idempotent
@fails_service_backend()
@fails_local_backend()
def test_pc_project(self):
mt = hl.balding_nichols_model(3, 100, 50)
_, _, loadings_ht = hl.hwe_normalized_pca(mt.GT, k=10, compute_loadings=True)
mt = mt.annotate_rows(af=hl.agg.mean(mt.GT.n_alt_alleles()) / 2)
loadings_ht = loadings_ht.annotate(af=mt.rows()[loadings_ht.key].af)
mt_to_project = hl.balding_nichols_model(3, 100, 50)
ht = hl.experimental.pc_project(mt_to_project.GT, loadings_ht.loadings, loadings_ht.af)
assert ht._force_count() == 100
@skip_when_service_backend('slow >800s')
def test_mt_full_outer_join(self):
mt1 = hl.utils.range_matrix_table(10, 10)
mt1 = mt1.annotate_cols(c1=hl.rand_unif(0, 1))
mt1 = mt1.annotate_rows(r1=hl.rand_unif(0, 1))
mt1 = mt1.annotate_entries(e1=hl.rand_unif(0, 1))
mt2 = hl.utils.range_matrix_table(10, 10)
mt2 = mt2.annotate_cols(c1=hl.rand_unif(0, 1))
mt2 = mt2.annotate_rows(r1=hl.rand_unif(0, 1))
mt2 = mt2.annotate_entries(e1=hl.rand_unif(0, 1))
mtj = hl.experimental.full_outer_join_mt(mt1, mt2)
assert(mtj.aggregate_entries(hl.agg.all(mtj.left_entry == mt1.index_entries(mtj.row_key, mtj.col_key))))
assert(mtj.aggregate_entries(hl.agg.all(mtj.right_entry == mt2.index_entries(mtj.row_key, mtj.col_key))))
mt2 = mt2.key_cols_by(new_col_key = 5 - (mt2.col_idx // 2)) # duplicate col keys
mt1 = mt1.key_rows_by(new_row_key = 5 - (mt1.row_idx // 2)) # duplicate row keys
mtj = hl.experimental.full_outer_join_mt(mt1, mt2)
assert(mtj.count() == (15, 15))
@skip_when_service_backend('hangs')
def test_mt_full_outer_join_self(self):
mt = hl.import_vcf(resource('sample.vcf'))
jmt = hl.experimental.full_outer_join_mt(mt, mt)
assert jmt.filter_cols(hl.is_defined(jmt.left_col) & hl.is_defined(jmt.right_col)).count_cols() == mt.count_cols()
assert jmt.filter_rows(hl.is_defined(jmt.left_row) & hl.is_defined(jmt.right_row)).count_rows() == mt.count_rows()
assert jmt.filter_entries(hl.is_defined(jmt.left_entry) & hl.is_defined(jmt.right_entry)).entries().count() == mt.entries().count()
@fails_service_backend()
@fails_local_backend()
def test_block_matrices_tofiles(self):
data = [
np.random.rand(11*12),
np.random.rand(5*17)
]
arrs = [
data[0].reshape((11, 12)),
data[1].reshape((5, 17))
]
bms = [
hl.linalg.BlockMatrix._create(11, 12, data[0].tolist(), block_size=4),
hl.linalg.BlockMatrix._create(5, 17, data[1].tolist(), block_size=8)
]
with hl.TemporaryDirectory() as prefix:
hl.experimental.block_matrices_tofiles(bms, f'{prefix}/files')
for i in range(len(bms)):
a = data[i]
a2 = np.frombuffer(
hl.current_backend().fs.open(f'{prefix}/files/{i}', mode='rb').read())
self.assertTrue(np.array_equal(a, a2))
@fails_service_backend()
@fails_local_backend()
def test_export_block_matrices(self):
data = [
np.random.rand(11*12),
np.random.rand(5*17)
]
arrs = [
data[0].reshape((11, 12)),
data[1].reshape((5, 17))
]
bms = [
hl.linalg.BlockMatrix._create(11, 12, data[0].tolist(), block_size=4),
hl.linalg.BlockMatrix._create(5, 17, data[1].tolist(), block_size=8)
]
with hl.TemporaryDirectory() as prefix:
hl.experimental.export_block_matrices(bms, f'{prefix}/files')
for i in range(len(bms)):
a = arrs[i]
a2 = np.loadtxt(
hl.current_backend().fs.open(f'{prefix}/files/{i}.tsv'))
self.assertTrue(np.array_equal(a, a2))
with hl.TemporaryDirectory() as prefix2:
custom_names = ["nameA", "inner/nameB.tsv"]
hl.experimental.export_block_matrices(bms, f'{prefix2}/files', custom_filenames=custom_names)
for i in range(len(bms)):
a = arrs[i]
a2 = np.loadtxt(
hl.current_backend().fs.open(f'{prefix2}/files/{custom_names[i]}'))
self.assertTrue(np.array_equal(a, a2))
def test_loop(self):
def triangle_with_ints(n):
return hl.experimental.loop(
lambda f, x, c: hl.if_else(x > 0, f(x - 1, c + x), c),
hl.tint32, n, 0)
def triangle_with_tuple(n):
return hl.experimental.loop(
lambda f, xc: hl.if_else(xc[0] > 0, f((xc[0] - 1, xc[1] + xc[0])), xc[1]),
hl.tint32, (n, 0))
for triangle in [triangle_with_ints, triangle_with_tuple]:
assert_evals_to(triangle(20), sum(range(21)))
assert_evals_to(triangle(0), 0)
assert_evals_to(triangle(-1), 0)
def fails_typecheck(regex, f):
with self.assertRaisesRegex(TypeError, regex):
hl.eval(hl.experimental.loop(f, hl.tint32, 1))
fails_typecheck("outside of tail position",
lambda f, x: x + f(x))
fails_typecheck("wrong number of arguments",
lambda f, x: f(x, x + 1))
fails_typecheck("bound value",
lambda f, x: hl.bind(lambda x: x, f(x)))
fails_typecheck("branch condition",
lambda f, x: hl.if_else(f(x) == 0, x, 1))
fails_typecheck("Type error",
lambda f, x: hl.if_else(x == 0, f("foo"), 1))
def test_nested_loops(self):
def triangle_loop(n, add_f):
recur = lambda f, x, c: hl.if_else(x <= n, f(x + 1, add_f(x, c)), c)
return hl.experimental.loop(recur, hl.tint32, 0, 0)
assert_evals_to(triangle_loop(5, lambda x, c: c + x), 15)
assert_evals_to(triangle_loop(5, lambda x, c: c + triangle_loop(x, lambda x2, c2: c2 + x2)), 15 + 10 + 6 + 3 + 1)
n1 = 5
calls_recur_from_nested_loop = hl.experimental.loop(
lambda f, x1, c1:
hl.if_else(x1 <= n1,
hl.experimental.loop(
lambda f2, x2, c2:
hl.if_else(x2 <= x1,
f2(x2 + 1, c2 + x2),
f(x1 + 1, c1 + c2)),
'int32', 0, 0),
c1),
'int32', 0, 0)
assert_evals_to(calls_recur_from_nested_loop, 15 + 10 + 6 + 3 + 1)
def test_loop_errors(self):
with pytest.raises(TypeError, match="requested type ndarray<int32, 2> does not match inferred type ndarray<float64, 2>"):
result = hl.experimental.loop(
lambda f, my_nd:
hl.if_else(my_nd[0, 0] == 1000, my_nd, f(my_nd + 1)),
hl.tndarray(hl.tint32, 2), hl.nd.zeros((20, 10), hl.tfloat64))
def test_loop_with_struct_of_strings(self):
def loop_func(recur_f, my_struct):
return hl.if_else(hl.len(my_struct.s1) > hl.len(my_struct.s2),
my_struct,
recur_f(hl.struct(s1=my_struct.s1 + my_struct.s2[-1], s2=my_struct.s2[:-1])))
initial_struct = hl.struct(s1="a", s2="gfedcb")
assert hl.eval(hl.experimental.loop(loop_func, hl.tstruct(s1=hl.tstr, s2=hl.tstr), initial_struct)) == hl.Struct(s1="abcd", s2="gfe")
def test_loop_memory(self):
def foo(recur, arr, idx): return hl.if_else(idx > 10, arr, recur(arr.append(hl.str(idx)), idx+1))
assert hl.eval(hl.experimental.loop(foo, hl.tarray(hl.tstr), hl.literal(['foo']), 1)) == ['foo', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
| |
from collections import namedtuple
import hashlib
import logging
import mimetypes
import os
import subprocess32 as subprocess
import time
from dropbox.client import DropboxClient
from dropbox.rest import ErrorResponse
from app import analytics
from app import celery
from app import db
from app import emailer
from app import filesystem
from app import redis
from app.models import User, Book
log = logging.getLogger()
# Lock expires in 30 minutes, in case there are lots of epubs to convert.
LOCK_EXPIRE = 60 * 30
# And can only email 25 books at a time. Sendgrid only allows 20MB at a time,
# after encoding to email text, so more like 15. Mailgun is about 25MB? And
# can only email 25 books at a time.
# Lower ATTACHMENTS_LIMIT to prevent users from hogging the celery workers.
ATTACHMENTS_LIMIT = 5
CONVERTIBLE_ATTACHMENTS_LIMIT = 1
ATTACHMENTS_SIZE_LIMIT = 25 * (10**6)
AMAZON_SIZE_LIMIT = 50 * (10**6)
# Try to send a file this many times before giving up. Sending a file means
# successful Dropbox download, file conversion, and correct response from
# SendGrid or Mailgun.
MAX_SEND_ATTEMPTS = 10
# Number of seconds to wait before timing out calibre conversion
CONVERSION_TIMEOUT = 1200
################################
# Book mimetypes
################################
# Amazon doesn't support these formats, but BookDrop does!
EPUB_MIMETYPE = 'application/epub+zip'
CBR_MIMETYPE = 'application/x-cbr'
CBZ_MIMETYPE = 'application/x-cbz'
AZW_MIMETYPE = 'application/vnd.amazon.ebook' # not a real mimetype, but we need to recognize it.
CONVERTIBLE_MIMETYPES = {EPUB_MIMETYPE,
CBR_MIMETYPE,
CBZ_MIMETYPE,
AZW_MIMETYPE,
}
MOBI_MIMETYPE = 'application/x-mobipocket-ebook'
# Supported filetypes.
# According to:
# http://www.amazon.com/gp/help/customer/display.html?nodeId=200375630
BOOK_MIMETYPES = CONVERTIBLE_MIMETYPES.union({
MOBI_MIMETYPE,
'text/plain',
'application/pdf',
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/rtf',
'text/html',
'image/jpeg',
'image/gif',
'image/x-ms-bmp',
'image/png',
})
mimetypes.add_type(MOBI_MIMETYPE, '.mobi')
mimetypes.add_type(MOBI_MIMETYPE, '.prc')
mimetypes.add_type(AZW_MIMETYPE, '.azw')
mimetypes.add_type(AZW_MIMETYPE, '.azw1')
mimetypes.add_type(AZW_MIMETYPE, '.azw3')
mimetypes.add_type(EPUB_MIMETYPE, '.epub')
@celery.task(ignore_result=True)
def upload_welcome_pdf(dropbox_id):
user = User.query.filter_by(dropbox_id=dropbox_id,
active=True).first()
if user is None:
return False
# If we've already sent the welcome PDF before, Dropbox webhook went
# trigger, so do it here.
if user.uploaded_welcome_pdf:
return kindlebox(dropbox_id)
analytics.track(str(user.id), 'Sent welcome pdf')
client = DropboxClient(user.access_token)
try:
with open('app/static/bookdrop_welcome.pdf', 'rb') as f:
response = client.put_file('Welcome to BookDrop.pdf', f, overwrite=True)
if response:
log.info(u"Welcome PDF sent to user ID {0}.".format(user.id))
else:
raise Exception("No response received after sending welcome PDF")
user.set_uploaded_welcome_pdf()
db.session.commit()
except:
log.error((u"Welcome PDF failed for user ID "
"{0}.").format(user.id), exc_info=True)
return False
return True
def _kindlebox(user, client):
"""
The main body of a `kindlebox` task. Processes a single Dropbox delta for
the given user. Adds and deletes any books from the database, and updates
the user's Dropbox API cursor.
"""
try:
delta = client.delta(user.cursor)
except ErrorResponse as e:
log.info(u"Marking user id {0} inactive due to {1}".format(user.id, e.error_msg))
user.active = False
db.session.commit()
return True
# Process delta to get added and removed books. Also download any newly
# added books and get the hashes.
# NOTE: It's possible that the book failed to download here, in which case
# each book in `added_books` has `book_hash` None. We still add it to the
# database in case it can be downloaded later.
added_books = get_added_books(delta['entries'], user.id, client)
removed_books = get_removed_books(delta['entries'], user.id)
log.debug(u"Delta contains {0} added books, {1} removed "
"books".format(len(added_books), len(removed_books)))
# If there are no more changes to process, update the cursor and we are
# done.
if len(added_books) == 0 and len(removed_books) == 0:
user.cursor = delta['cursor']
db.session.commit()
return True
# Add and delete books from the database.
for book in added_books:
db.session.add(book)
for book in removed_books:
db.session.delete(book)
# Update the Dropbox delta cursor in database.
user.cursor = delta['cursor']
db.session.merge(user)
db.session.commit()
return False
@celery.task(ignore_result=True)
def kindlebox(dropbox_id):
"""
Task that continually processes any Dropbox changes for the user associated
with the given dropbox ID until there are no more changes. Any books
removed from Dropbox are also deleted from the database. The first
`ATTACHMENTS_LIMIT` books out of the books added to Dropbox are sent. The
rest of the books are queued.
"""
# Only process Dropbox changes for active users.
user = User.query.filter_by(dropbox_id=dropbox_id, active=True).first()
if user is None:
return
kindlebox_lock = acquire_kindlebox_lock(user.id)
# Another worker is taking care of it, so I'm done.
if kindlebox_lock is None:
log.debug(u"Unable to acquire kindlebox lock for user id "
"{0}".format(user.id))
return
log.info(u"Processing dropbox webhook for user id {0}".format(user.id))
# Loop until there is no delta.
# NOTE: There is a slight chance of a race condition between dropbox
# webhook and two celery workers that would result in a delta getting
# dropped, but hopefully this is better than cluttering the task queues.
client = DropboxClient(user.access_token)
try:
while True:
log.debug(u"Processing one kindlebox iteration for user id "
"{0}".format(user.id))
done = _kindlebox(user, client)
if done:
break
except:
log.error((u"Failed to process dropbox webhook for user id "
"{0}.").format(user.id), exc_info=True)
kindlebox_lock.release()
clear_user_files(user.id, u'kindlebox')
if user.active:
send_books(user.id)
def _send_books(user, books):
"""
Helper function for the `send_books` celery task. Download, if necessary,
and email all the given user's books. Mark each book as `unsent` or not in
the database.
"""
client = DropboxClient(user.access_token)
email_from = user.emailer
email_to = [row.kindle_name for row in user.kindle_names.all()]
attachments = []
attachment_size = 0
for book in books:
# If there's an error downloading or converting the book, don't try
# to send it.
download_book(client, book, u'send_books')
if book.book_hash is None:
continue
error = convert_book(book)
if error:
log.error(u"Failed to ebook-convert {book} for user id {user_id}\n"
"STDERR: {stderr}\n".format(book=book.pathname,
user_id=user.id,
stderr=error))
continue
# If the next book added will put us over the attachment size limit,
# send this batch.
# NOTE: An individual book with size over the limit will still get sent
# using this code. We want to do this in case it actually is possible
# to send the file (who knows what sendgrid's limits are?).
if (attachment_size + book.get_size() > ATTACHMENTS_SIZE_LIMIT and
len(attachments) > 0):
email_attachments(email_from, email_to, attachments, user.id)
attachments = []
attachment_size = 0
attachments.append(book)
attachment_size += book.get_size()
if len(attachments) > 0:
email_attachments(email_from, email_to, attachments, user.id)
@celery.task(ignore_result=True)
def send_books(user_id, min_book_id=0, convert=False):
"""
Task to send any books associated with the given user ID that are marked as
`unsent`. Sends a batch of at most `ATTACHMENTS_LIMIT` books, all with
Book.id greater than or equal to the given `min_book_id`. Download books.
Convert books if `convert` is True.
The task queues another `send_books` task for the next batch of (distinct)
books.
"""
send_lock = acquire_send_books_lock(user_id)
if send_lock is None:
return
# Only resend books for active users.
user = User.query.filter_by(id=user_id, active=True).first()
if user is None:
return
# Get the next batch of books that haven't been sent yet and are still
# under the maximum number of send attempts.
unsent_books_query = (user.books.filter_by(unsent=True)
.filter(Book.num_attempts < MAX_SEND_ATTEMPTS)
.order_by(Book.id))
unsent_books = unsent_books_query.filter(Book.id >= min_book_id).all()
# Only short-circuit if there are no new books at all to send, not just
# ones that don't need conversion.
if len(unsent_books) == 0 and min_book_id == 0:
send_lock.release()
clear_user_files(user.id, u'send_books')
return
# Send either books that need conversion or books that don't.
compatible_books, convertible_books = [], []
for book in unsent_books:
if convert_to_mobi_path(book.pathname) is None:
compatible_books.append(book)
else:
convertible_books.append(book)
if convert:
unsent_books = convertible_books[:CONVERTIBLE_ATTACHMENTS_LIMIT]
else:
unsent_books = compatible_books[:ATTACHMENTS_LIMIT]
log_string = ['{' + str(i) + '}' for i in range(len(unsent_books))]
if len(unsent_books) > 0:
log_string = ' '.join(log_string).format(*[book.id for book in unsent_books])
if convert:
log_string += ', with conversion'
log.info(u"Processing book resend for user id {0}, book ids {1}".format(user_id, log_string))
# Re-download and convert books that failed to send before.
try:
_send_books(user, unsent_books)
# TODO: Reset all attempts to 0 before release.
for book in unsent_books:
book.num_attempts += 1
db.session.commit()
except:
log.error(u"Failed to resend books for user id {0}".format(user_id),
exc_info=True)
next_unsent_book = None
if len(unsent_books) > 0:
# If there are any more books to send after this batch, requeue them.
next_unsent_book = unsent_books_query.filter(Book.id > unsent_books[-1].id).first()
send_lock.release()
# For some reason, calibre is leaving a lot of garbage files...
filesystem.clear_calibre_files()
clear_user_files(user.id, u'send_books')
if next_unsent_book is None and not convert:
send_books.apply_async((user_id, ),
{'convert': True},
queue='conversion')
elif next_unsent_book is not None:
queue_kwarg = {}
if convert:
queue_kwarg['queue'] = 'conversion'
send_books.apply_async((user_id, ),
{
'min_book_id': next_unsent_book.id,
'convert': convert,
},
**queue_kwarg)
def get_added_books(delta_entries, user_id, client):
"""
Return a list of Books. All books in this list have the correct mimetype,
are under the size limit, and don't have a duplicate hash in the database
(i.e. not a filepath rename).
"""
added_entries = []
for entry in delta_entries:
pathname, metadata = entry
pathname = canonicalize(pathname)
# First check that it's not a removed pathname.
if metadata is None:
continue
# Check that pathname is a file, has an okay mimetype and is under the
# size limit.
if (metadata['is_dir'] or not mimetypes_filter(pathname) or
metadata['bytes'] > AMAZON_SIZE_LIMIT):
continue
book = Book(user_id,
pathname,
metadata['bytes'])
download_book(client, book, u'kindlebox')
# Make sure that the book is not a duplicate of a previously added book
# (probably a renamed file).
duplicate = (Book.query.filter_by(user_id=user_id)
.filter_by(book_hash=book.book_hash).first())
if (duplicate is not None):
book.unsent = duplicate.unsent
added_entries.append(book)
return added_entries
def get_removed_books(delta_entries, user_id):
"""
Return a list of Books whose paths were deleted during this delta.
"""
removed_entries = [canonicalize(entry[0]) for entry in delta_entries if
entry[1] is None]
if len(removed_entries) > 0:
return (Book.query.filter_by(user_id=user_id)
.filter(Book.pathname.in_(removed_entries)).all())
else:
return []
def convert_book(book):
"""
Attempt to convert any books of type in `CONVERTIBLE_MIMETYPES` to .mobi,
in the same folder as the given temporary path.
"""
tmp_path = book.get_tmp_pathname(u'send_books')
mobi_tmp_path = convert_to_mobi_path(tmp_path)
if mobi_tmp_path is None:
return None
log.info(u"Converting book for user id {0}".format(book.user_id))
try:
subprocess.check_output(['ebook-convert', tmp_path, mobi_tmp_path],
timeout=CONVERSION_TIMEOUT)
except subprocess.CalledProcessError as e:
return e.output
except subprocess.TimeoutExpired as e:
return "Timed out converting book"
except Exception as e:
return e.message
def download_book(client, book, tag):
"""
Download the given book from the Dropbox client to a temporary path. Make
all the directories in the given book path at the temporary root folder if
they don't already exist.
Set the book's hash of the downloaded file.
"""
# Make all the necessary nested directories in the temporary directory.
tmp_path = book.get_tmp_pathname(tag)
try:
book_dir = os.path.dirname(tmp_path)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
except OSError:
log.error(u"Error creating directories for book {0}".format(book.pathname),
exc_info=True)
try:
md5 = hashlib.md5()
with open(tmp_path, 'w') as tmp_book:
with client.get_file(book.pathname) as book_file:
data = book_file.read()
tmp_book.write(data)
md5.update(data)
book.book_hash = md5.hexdigest()
except:
log.error(u"Failed to download book {book_path} for user id "
"{user_id}".format(book_path=book.pathname,
user_id=book.user_id), exc_info=True)
return None
def email_attachments(email_from, email_to, attachments, user_id):
"""
Given a 'from' email address and a list of 'to' email addresses, try to
email as many of the attachments in the given list as possible. For each
attachment, add the book to the user associated with the given ID and mark
whether it was successfully emailed or not.
"""
attachment_paths = []
for book in attachments:
tmp_path = book.get_tmp_pathname(u'send_books')
# If this book got converted, get the .mobi path instead.
mobi_tmp_path = convert_to_mobi_path(tmp_path)
if mobi_tmp_path is not None:
tmp_path = mobi_tmp_path
attachment_paths.append(tmp_path)
log.debug(u"Sending email to " + ' '.join(email_to) + " " + ' '.join(attachment_paths))
try:
# First try to batch email.
_email_attachments(email_from, email_to, attachment_paths)
for book in attachments:
book.mark_unsent(False)
except:
log.error(u"Failed to send books for user id {0}".format(user_id),
exc_info=True)
# If fail to batch email, try sending individually instead.
for book, path in zip(attachments, attachment_paths):
try:
_email_attachments(email_from, email_to, [path])
book.mark_unsent(False)
except:
log.error(u"Failed to resend book for user id {0}".format(user_id),
exc_info=True)
book.mark_unsent(True)
def _email_attachments(email_from, email_to, attachment_paths):
status, message = emailer.send_mail(email_from, email_to,
attachment_paths)
if status != 200:
raise KindleboxException(message)
def convert_to_mobi_path(path):
if mimetypes.guess_type(path)[0] in CONVERTIBLE_MIMETYPES:
stripped_path = os.path.splitext(path)[0]
return u'{path}.mobi'.format(path=stripped_path)
def canonicalize(pathname):
return pathname.lower()
def mimetypes_filter(path):
return mimetypes.guess_type(path)[0] in BOOK_MIMETYPES
def _acquire_lock(method_name, user_id):
# Lock per user.
lock_id = '{0}-lock-{1}'.format(method_name, user_id)
lock = redis.lock(lock_id, timeout=LOCK_EXPIRE)
# If non-blocking and unable to acquire lock, discard the task and hope
# that another worker finishes it.
if not lock.acquire(blocking=False):
log.debug(u"Couldn't acquire lock {0}.".format(lock_id))
return None
log.debug(u"Lock {0} acquired.".format(lock_id))
return lock
def acquire_kindlebox_lock(user_id):
"""
"""
return _acquire_lock(kindlebox.__name__,
user_id)
def acquire_send_books_lock(user_id):
"""
"""
return _acquire_lock(send_books.__name__,
user_id)
def clear_user_files(user_id, task):
"""
Clears as many temporary files as possible for the given `user_id` and
celery `task`. If `task` is not one of 'kindlebox' or 'send_books', does
nothing.
"""
if task == u'kindlebox':
acquire_method = acquire_send_books_lock
elif task == u'send_books':
acquire_method = acquire_kindlebox_lock
else:
return
task_directory = filesystem.get_user_directory(user_id, task)
filesystem.clear_directory(task_directory)
# May be downloading books to send, so don't clear the upper-level
# directory yet.
lock = acquire_method(user_id)
if lock is not None:
user_directory = filesystem.get_user_directory(user_id)
filesystem.clear_empty_directory(user_directory)
lock.release()
class KindleboxException(Exception):
pass
| |
#!/usr/bin/python3
# encoding: utf-8
"""
Check ECR to see if a given version of the image exists in the repo
Notes:
all_tags: json string in the form '{"all_tags" : ["a", "b", "c"]}'
"""
import json
import sys
from sarge import get_stdout, run
AWS = '/usr/local/bin/aws'
DOCKER = '/usr/bin/docker'
def get_image_ids_from_ecr(region: str=None, repo: str=None):
"""
Get all the image ids from the ECR repo
:param region:
:param repo:
:return:
"""
if region is None:
return False
if repo is None:
return False
else:
output = get_stdout('''{AWS} ecr list-images --region {region} --repository-name {repo}'''
.format(AWS=AWS, region=region, repo=repo))
json_output = json.loads(output)
if 'imageIds' in json_output:
return(json_output['imageIds'])
else:
return([])
def check_for_tag(image_ids: list=[], tag: str=None):
"""
Check to see if the given tag is in the image_ids
:param image_ids:
:param tag:
:return:
"""
for image in image_ids:
if ('imageTag') in image:
if image['imageTag'] == tag:
return True
return False
def get_tags_from_image_ids(image_ids: list=[]):
"""
Get the tags for the given image_ids
Note that nulls and 'latest' is ignored
:param image_ids:
:return:
"""
tags = []
for image in image_ids:
if ('imageTag') in image:
tag = image['imageTag']
if (tag is None) or (tag == 'latest'):
pass
else:
tags = tags + [tag]
return tags
def get_tags_from_all_tags(all_tags: str=None):
"""
Convert the incoming stringified JSON into a list of tags
:param all_tags:
:return:
"""
all_tags_json = '{"allTags" : ["' + all_tags.replace('\n', '","') + '"] }'
json_output = json.loads(all_tags_json)
if 'allTags' in json_output:
return(json_output['allTags'])
else:
return([])
def get_tags_from_ecr(region: str=None, repo: str=None):
"""
Get all the tags for the repo from ECR
:param region:
:param repo:
:return:
"""
# Get the image_ids from the registry
image_ids = get_image_ids_from_ecr(region=region, repo=repo)
# Get the tags from the image_ids
tags = get_tags_from_image_ids(image_ids)
return tags
def push_if_not_exist(region: str=None, registry_prefix: str=None, repo: str=None, tag: str=None):
"""
Push the image to the registry if it doesn't exist
:param region:
:param registry_prefix:
:param repo:
:param tag:
:return:
"""
image_ids = get_image_ids_from_ecr(region=region, repo=repo)
if not check_for_tag(image_ids=image_ids, tag=tag):
# Make sure image exists, else NoOp
image_exists = get_stdout('''{docker} images -q {repo}'''.format(docker=DOCKER,
repo=repo))
if not image_exists:
return(False)
target = '''{registry_prefix}/{repo}:{tag}'''.format(registry_prefix=registry_prefix,
repo=repo,
tag=tag)
# Tag the repo
run('''{docker} tag {repo} {target}'''.format(docker=DOCKER,
repo=repo,
target=target))
# And push it to the registry
run('''{docker} push {target}'''.format(docker=DOCKER, target=target))
return(True)
def pull_if_not_exist(region: str=None, registry_prefix: str=None, repo: str=None, tag: str=None):
"""
Pull the image from the registry if it doesn't exist locally
:param region:
:param registry_prefix:
:param repo:
:param tag:
:return:
"""
output = get_stdout('''{docker} images {repo}:{tag}'''.format(docker=DOCKER,
repo=repo,
tag=tag))
if repo not in output:
target = '''{registry_prefix}/{repo}:{tag}'''.format(registry_prefix=registry_prefix,
repo=repo,
tag=tag)
# And push it to the registry
run('''{docker} pull {target}'''.format(docker=DOCKER,
target=target))
# Tag the repo back to the unqualified name
run('''{docker} tag {target} {repo}'''.format(docker=DOCKER,
repo=repo,
target=target))
# Tag the repo back to be 'latest'
run('''{docker} tag {target} {repo}:latest'''.format(docker=DOCKER,
repo=repo,
target=target))
# Tag the repo back to the correct tag
run('''{docker} tag {target} {repo}:{tag}'''.format(docker=DOCKER,
repo=repo,
tag=tag,
target=target))
return(True)
def prune_repos(region: str=None, registry_prefix: str=None, repo: str=None, current_tag: str=None, all_tags: str=None):
"""
Pull the image from the registry if it doesn't exist locally
:param region:
:param registry_prefix:
:param repo:
:param current_tag:
:param all_tags:
:return:
"""
# Get the tags from the all_tags JSON
all_tags_list = get_tags_from_all_tags(all_tags)
# Add the current_tag to the recent (local) tags. Just to be safe
recent_tags = all_tags_list + [current_tag]
# Get the tags for the repo from ECR
ecr_tags = get_tags_from_ecr(region, repo)
# Get all the tags in the registry that are *not* the ones we want
bad_tags = [tag for tag in ecr_tags if tag not in recent_tags]
# Delete the obsolete images
for tag in bad_tags:
output = get_stdout('''{AWS} ecr batch-delete-image --region {region} --repository-name {repo} --image-ids imageTag={tag}'''
.format(AWS=AWS, region=region, repo=repo, tag=tag))
return True
def get_args(argv: list=[]):
"""
Make sure that we got what we need, and use it
:param argv:
:return:
"""
try:
command = sys.argv[1]
region = sys.argv[2]
registry_prefix = sys.argv[3]
repo = sys.argv[4]
tag = sys.argv[5]
all_tags = sys.argv[6]
if all_tags is "None":
all_tags = ''
return command, region, registry_prefix, repo, tag, all_tags
except IndexError:
all_tags = None
except:
sys.exit("Whyfor you not send in args?")
command, region, registry_prefix, repo, tag, all_tags = get_args(sys.argv)
if command == "push":
retval = push_if_not_exist(region=region, registry_prefix=registry_prefix, repo=repo, tag=tag)
if not retval:
print('FAILED')
elif command == "pull":
retval = pull_if_not_exist(region=region, registry_prefix=registry_prefix, repo=repo, tag=tag)
if not retval:
print('FAILED')
elif command == "prune":
# If an empty list is passed in for all_tags, fail violently
if all_tags is None:
print('FAILED')
else:
retval = prune_repos(region=region, registry_prefix=registry_prefix, repo=repo, current_tag=tag, all_tags=all_tags)
if not retval:
print('FAILED')
else:
print('FAILED')
| |
import unittest
from io import BytesIO, BufferedReader
try:
import http.client as httplib
except ImportError:
import httplib
from urllib3.response import HTTPResponse
from urllib3.exceptions import DecodeError, ResponseNotChunked
from base64 import b64decode
# A known random (i.e, not-too-compressible) payload generated with:
# "".join(random.choice(string.printable) for i in xrange(512))
# .encode("zlib").encode("base64")
# Randomness in tests == bad, and fixing a seed may not be sufficient.
ZLIB_PAYLOAD = b64decode(b"""\
eJwFweuaoQAAANDfineQhiKLUiaiCzvuTEmNNlJGiL5QhnGpZ99z8luQfe1AHoMioB+QSWHQu/L+
lzd7W5CipqYmeVTBjdgSATdg4l4Z2zhikbuF+EKn69Q0DTpdmNJz8S33odfJoVEexw/l2SS9nFdi
pis7KOwXzfSqarSo9uJYgbDGrs1VNnQpT9f8zAorhYCEZronZQF9DuDFfNK3Hecc+WHLnZLQptwk
nufw8S9I43sEwxsT71BiqedHo0QeIrFE01F/4atVFXuJs2yxIOak3bvtXjUKAA6OKnQJ/nNvDGKZ
Khe5TF36JbnKVjdcL1EUNpwrWVfQpFYJ/WWm2b74qNeSZeQv5/xBhRdOmKTJFYgO96PwrHBlsnLn
a3l0LwJsloWpMbzByU5WLbRE6X5INFqjQOtIwYz5BAlhkn+kVqJvWM5vBlfrwP42ifonM5yF4ciJ
auHVks62997mNGOsM7WXNG3P98dBHPo2NhbTvHleL0BI5dus2JY81MUOnK3SGWLH8HeWPa1t5KcW
S5moAj5HexY/g/F8TctpxwsvyZp38dXeLDjSQvEQIkF7XR3YXbeZgKk3V34KGCPOAeeuQDIgyVhV
nP4HF2uWHA==""")
class TestLegacyResponse(unittest.TestCase):
def test_getheaders(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheaders(), headers)
def test_getheader(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheader('host'), 'example.com')
class TestResponse(unittest.TestCase):
def test_cache_content(self):
r = HTTPResponse('foo')
self.assertEqual(r.data, 'foo')
self.assertEqual(r._body, 'foo')
def test_default(self):
r = HTTPResponse()
self.assertEqual(r.data, None)
def test_none(self):
r = HTTPResponse(None)
self.assertEqual(r.data, None)
def test_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=True)
self.assertEqual(fp.tell(), len(b'foo'))
self.assertEqual(r.data, b'foo')
def test_no_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=False)
self.assertEqual(fp.tell(), 0)
self.assertEqual(r.data, b'foo')
self.assertEqual(fp.tell(), len(b'foo'))
def test_decode_bad_data(self):
fp = BytesIO(b'\x00' * 10)
self.assertRaises(DecodeError, HTTPResponse, fp, headers={
'content-encoding': 'deflate'
})
def test_reference_read(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=False)
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_decode_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'})
self.assertEqual(r.data, b'foo')
def test_decode_deflate_case_insensitve(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'DeFlAtE'})
self.assertEqual(r.data, b'foo')
def test_chunked_decoding_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(3), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_chunked_decoding_deflate2(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(1), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_chunked_decoding_gzip(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
self.assertEqual(r.read(11), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
self.assertEqual(r.read(), b'')
self.assertEqual(r.read(), b'')
def test_body_blob(self):
resp = HTTPResponse(b'foo')
self.assertEqual(resp.data, b'foo')
self.assertTrue(resp.closed)
def test_io(self):
import socket
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
self.assertEqual(resp.closed, False)
self.assertEqual(resp.readable(), True)
self.assertEqual(resp.writable(), False)
self.assertRaises(IOError, resp.fileno)
resp.close()
self.assertEqual(resp.closed, True)
# Try closing with an `httplib.HTTPResponse`, because it has an
# `isclosed` method.
hlr = httplib.HTTPResponse(socket.socket())
resp2 = HTTPResponse(hlr, preload_content=False)
self.assertEqual(resp2.closed, False)
resp2.close()
self.assertEqual(resp2.closed, True)
#also try when only data is present.
resp3 = HTTPResponse('foodata')
self.assertRaises(IOError, resp3.fileno)
resp3._fp = 2
# A corner case where _fp is present but doesn't have `closed`,
# `isclosed`, or `fileno`. Unlikely, but possible.
self.assertEqual(resp3.closed, True)
self.assertRaises(IOError, resp3.fileno)
def test_io_bufferedreader(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
br = BufferedReader(resp)
self.assertEqual(br.read(), b'foo')
br.close()
self.assertEqual(resp.closed, True)
b = b'fooandahalf'
fp = BytesIO(b)
resp = HTTPResponse(fp, preload_content=False)
br = BufferedReader(resp, 5)
br.read(1) # sets up the buffer, reading 5
self.assertEqual(len(fp.read()), len(b) - 5)
# This is necessary to make sure the "no bytes left" part of `readinto`
# gets tested.
while not br.closed:
br.read(5)
def test_io_readinto(self):
# This test is necessary because in py2.6, `readinto` doesn't get called
# in `test_io_bufferedreader` like it does for all the other python
# versions. Probably this is because the `io` module in py2.6 is an
# old version that has a different underlying implementation.
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
barr = bytearray(3)
assert resp.readinto(barr) == 3
assert b'foo' == barr
# The reader should already be empty, so this should read nothing.
assert resp.readinto(barr) == 0
assert b'foo' == barr
def test_streaming(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
self.assertEqual(next(stream), b'fo')
self.assertEqual(next(stream), b'o')
self.assertRaises(StopIteration, next, stream)
def test_streaming_tell(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
position = 0
position += len(next(stream))
self.assertEqual(2, position)
self.assertEqual(position, resp.tell())
position += len(next(stream))
self.assertEqual(3, position)
self.assertEqual(position, resp.tell())
self.assertRaises(StopIteration, next, stream)
def test_gzipped_streaming(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_gzipped_streaming_tell(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
uncompressed_data = b'foo'
data = compress.compress(uncompressed_data)
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
stream = resp.stream()
# Read everything
payload = next(stream)
self.assertEqual(payload, uncompressed_data)
self.assertEqual(len(data), resp.tell())
self.assertRaises(StopIteration, next, stream)
def test_deflate_streaming_tell_intermediate_point(self):
# Ensure that ``tell()`` returns the correct number of bytes when
# part-way through streaming compressed content.
import zlib
NUMBER_OF_READS = 10
class MockCompressedDataReading(BytesIO):
"""
A ByteIO-like reader returning ``payload`` in ``NUMBER_OF_READS``
calls to ``read``.
"""
def __init__(self, payload, payload_part_size):
self.payloads = [
payload[i*payload_part_size:(i+1)*payload_part_size]
for i in range(NUMBER_OF_READS+1)]
assert b"".join(self.payloads) == payload
def read(self, _):
# Amount is unused.
if len(self.payloads) > 0:
return self.payloads.pop(0)
return b""
uncompressed_data = zlib.decompress(ZLIB_PAYLOAD)
payload_part_size = len(ZLIB_PAYLOAD) // NUMBER_OF_READS
fp = MockCompressedDataReading(ZLIB_PAYLOAD, payload_part_size)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream()
parts_positions = [(part, resp.tell()) for part in stream]
end_of_stream = resp.tell()
self.assertRaises(StopIteration, next, stream)
parts, positions = zip(*parts_positions)
# Check that the payload is equal to the uncompressed data
payload = b"".join(parts)
self.assertEqual(uncompressed_data, payload)
# Check that the positions in the stream are correct
expected = [(i+1)*payload_part_size for i in range(NUMBER_OF_READS)]
self.assertEqual(expected, list(positions))
# Check that the end of the stream is in the correct place
self.assertEqual(len(ZLIB_PAYLOAD), end_of_stream)
def test_deflate_streaming(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_deflate2_streaming(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_empty_stream(self):
fp = BytesIO(b'')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
self.assertRaises(StopIteration, next, stream)
def test_mock_httpresponse_stream(self):
# Mock out a HTTP Request that does enough to make it through urllib3's
# read() and close() calls, and also exhausts and underlying file
# object.
class MockHTTPRequest(object):
self.fp = None
def read(self, amt):
data = self.fp.read(amt)
if not data:
self.fp = None
return data
def close(self):
self.fp = None
bio = BytesIO(b'foo')
fp = MockHTTPRequest()
fp.fp = bio
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'fo')
self.assertEqual(next(stream), b'o')
self.assertRaises(StopIteration, next, stream)
def test_mock_transfer_encoding_chunked(self):
stream = [b"fo", b"o", b"bar"]
fp = MockChunkedEncodingResponse(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
i = 0
for c in resp.stream():
self.assertEqual(c, stream[i])
i += 1
def test_mock_gzipped_transfer_encoding_chunked_decoded(self):
"""Show that we can decode the gizpped and chunked body."""
def stream():
# Set up a generator to chunk the gzipped body
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foobar')
data += compress.flush()
for i in range(0, len(data), 2):
yield data[i:i+2]
fp = MockChunkedEncodingResponse(list(stream()))
r = httplib.HTTPResponse(MockSock)
r.fp = fp
headers = {'transfer-encoding': 'chunked', 'content-encoding': 'gzip'}
resp = HTTPResponse(r, preload_content=False, headers=headers)
data = b''
for c in resp.stream(decode_content=True):
data += c
self.assertEqual(b'foobar', data)
def test_mock_transfer_encoding_chunked_custom_read(self):
stream = [b"foooo", b"bbbbaaaaar"]
fp = MockChunkedEncodingResponse(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
r.chunked = True
r.chunk_left = None
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
expected_response = [b'fo', b'oo', b'o', b'bb', b'bb', b'aa', b'aa', b'ar']
response = list(resp.read_chunked(2))
if getattr(self, "assertListEqual", False):
self.assertListEqual(expected_response, response)
else:
for index, item in enumerate(response):
v = expected_response[index]
self.assertEqual(item, v)
def test_mock_transfer_encoding_chunked_unlmtd_read(self):
stream = [b"foooo", b"bbbbaaaaar"]
fp = MockChunkedEncodingResponse(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
r.chunked = True
r.chunk_left = None
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
if getattr(self, "assertListEqual", False):
self.assertListEqual(stream, list(resp.read_chunked()))
else:
for index, item in enumerate(resp.read_chunked()):
v = stream[index]
self.assertEqual(item, v)
def test_read_not_chunked_response_as_chunks(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
r = resp.read_chunked()
self.assertRaises(ResponseNotChunked, next, r)
def test_invalid_chunks(self):
stream = [b"foooo", b"bbbbaaaaar"]
fp = MockChunkedInvalidEncoding(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
r.chunked = True
r.chunk_left = None
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
self.assertRaises(httplib.IncompleteRead, next, resp.read_chunked())
def test_chunked_response_without_crlf_on_end(self):
stream = [b"foo", b"bar", b"baz"]
fp = MockChunkedEncodingWithoutCRLFOnEnd(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
r.chunked = True
r.chunk_left = None
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
if getattr(self, "assertListEqual", False):
self.assertListEqual(stream, list(resp.stream()))
else:
for index, item in enumerate(resp.stream()):
v = stream[index]
self.assertEqual(item, v)
def test_chunked_response_with_extensions(self):
stream = [b"foo", b"bar"]
fp = MockChunkedEncodingWithExtensions(stream)
r = httplib.HTTPResponse(MockSock)
r.fp = fp
r.chunked = True
r.chunk_left = None
resp = HTTPResponse(r, preload_content=False, headers={'transfer-encoding': 'chunked'})
if getattr(self, "assertListEqual", False):
self.assertListEqual(stream, list(resp.stream()))
else:
for index, item in enumerate(resp.stream()):
v = stream[index]
self.assertEqual(item, v)
def test_get_case_insensitive_headers(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.headers.get('host'), 'example.com')
self.assertEqual(r.headers.get('Host'), 'example.com')
class MockChunkedEncodingResponse(object):
def __init__(self, content):
"""
content: collection of str, each str is a chunk in response
"""
self.content = content
self.index = 0 # This class iterates over self.content.
self.closed = False
self.cur_chunk = b''
self.chunks_exhausted = False
@staticmethod
def _encode_chunk(chunk):
# In the general case, we can't decode the chunk to unicode
length = '%X\r\n' % len(chunk)
return length.encode() + chunk + b'\r\n'
def _pop_new_chunk(self):
if self.chunks_exhausted:
return b""
try:
chunk = self.content[self.index]
except IndexError:
chunk = b''
self.chunks_exhausted = True
else:
self.index += 1
chunk = self._encode_chunk(chunk)
if not isinstance(chunk, bytes):
chunk = chunk.encode()
return chunk
def pop_current_chunk(self, amt=-1, till_crlf=False):
if amt > 0 and till_crlf:
raise ValueError("Can't specify amt and till_crlf.")
if len(self.cur_chunk) <= 0:
self.cur_chunk = self._pop_new_chunk()
if till_crlf:
try:
i = self.cur_chunk.index(b"\r\n")
except ValueError:
# No CRLF in current chunk -- probably caused by encoder.
self.cur_chunk = b""
return b""
else:
chunk_part = self.cur_chunk[:i+2]
self.cur_chunk = self.cur_chunk[i+2:]
return chunk_part
elif amt <= -1:
chunk_part = self.cur_chunk
self.cur_chunk = b''
return chunk_part
else:
try:
chunk_part = self.cur_chunk[:amt]
except IndexError:
chunk_part = self.cur_chunk
self.cur_chunk = b''
else:
self.cur_chunk = self.cur_chunk[amt:]
return chunk_part
def readline(self):
return self.pop_current_chunk(till_crlf=True)
def read(self, amt=-1):
return self.pop_current_chunk(amt)
def flush(self):
# Python 3 wants this method.
pass
def close(self):
self.closed = True
class MockChunkedInvalidEncoding(MockChunkedEncodingResponse):
def _encode_chunk(self, chunk):
return 'ZZZ\r\n%s\r\n' % chunk.decode()
class MockChunkedEncodingWithoutCRLFOnEnd(MockChunkedEncodingResponse):
def _encode_chunk(self, chunk):
return '%X\r\n%s%s' % (len(chunk), chunk.decode(),
"\r\n" if len(chunk) > 0 else "")
class MockChunkedEncodingWithExtensions(MockChunkedEncodingResponse):
def _encode_chunk(self, chunk):
return '%X;asd=qwe\r\n%s\r\n' % (len(chunk), chunk.decode())
class MockSock(object):
@classmethod
def makefile(cls, *args, **kwargs):
return
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import OrderedDict
import logging
import re
import sys
from nltk import Tree
from normalization import denormalize_token
from tree_tools import tree_or_string, tree_contains
def find_final_subgoal_line_index(coq_output_lines):
indices = [i for i, line in enumerate(coq_output_lines)
if line.endswith('subgoal')]
if not indices:
return None
return indices[-1]
def find_final_conclusion_sep_line_index(coq_output_lines):
indices = [i for i, line in enumerate(coq_output_lines)
if line.startswith('===') and line.endswith('===')]
if not indices:
return None
return indices[-1]
def get_premise_lines(coq_output_lines):
premise_lines = []
line_index_last_conclusion_sep = find_final_conclusion_sep_line_index(
coq_output_lines)
if not line_index_last_conclusion_sep:
return premise_lines
for line in coq_output_lines[line_index_last_conclusion_sep - 1:0:-1]:
if line == "":
return premise_lines
else:
premise_lines.append(line)
return premise_lines
def get_conclusion_line(coq_output_lines):
line_index_last_conclusion_sep = find_final_conclusion_sep_line_index(
coq_output_lines)
if not line_index_last_conclusion_sep:
return None
return coq_output_lines[line_index_last_conclusion_sep + 1]
def get_premises_that_match_conclusion_args_(premises, conclusion):
"""
Returns premises where the predicates have at least one argument
in common with the conclusion.
This function was used for EACL 2017.
"""
conclusion_terms = [c.strip(')(') for c in conclusion.split()]
conclusion_args = set(conclusion_terms[1:])
candidate_premises = []
for premise in premises:
premise_terms = [p.strip(')(') for p in premise.split()[2:]]
premise_args = set(premise_terms[1:])
logging.debug('Conclusion args: ' + str(conclusion_args) +
'\nPremise args: ' + str(premise_args))
if premise_args.intersection(conclusion_args):
candidate_premises.append(premise)
return candidate_premises
def get_premises_that_match_conclusion_args(premises, conclusion):
"""
Returns premises where the predicates have at least one argument
in common with the conclusion.
"""
candidate_premises = []
conclusion = re.sub(r'\?([0-9]+)', r'?x\1', conclusion)
conclusion_args = get_tree_pred_args(conclusion, is_conclusion=True)
if conclusion_args is None:
return candidate_premises
for premise_line in premises:
# Convert anonymous variables of the form ?345 into ?x345.
premise_line = re.sub(r'\?([0-9]+)', r'?x\1', premise_line)
premise_args = get_tree_pred_args(premise_line)
logging.debug('Conclusion args: ' + str(conclusion_args) +
'\nPremise args: ' + str(premise_args))
if tree_contains(premise_args, conclusion_args):
candidate_premises.append(premise_line)
return candidate_premises
def make_axioms_from_premises_and_conclusion(premises, conclusion, coq_output_lines=None):
matching_premises = get_premises_that_match_conclusion_args(
premises, conclusion)
premise_preds = [premise.split()[2] for premise in matching_premises]
conclusion_pred = conclusion.split()[0]
pred_args = get_predicate_arguments(premises, conclusion)
axioms = make_axioms_from_preds(premise_preds, conclusion_pred, pred_args)
# print('Has axioms: {0}'.format(axioms), file=sys.stderr)
failure_log = OrderedDict()
if not axioms:
failure_log = make_failure_log(
conclusion_pred, premise_preds, conclusion, premises, coq_output_lines)
# print(json.dumps(failure_log), file=sys.stderr)
return axioms, failure_log
def analyze_coq_output(output_lines):
"""
Returns a failure log with information about the unproved subgoals.
"""
failure_log = OrderedDict()
premise_lines = get_premise_lines(output_lines)
conclusion = get_conclusion_line(output_lines)
if not premise_lines or not conclusion:
failure_log = {"type error": has_type_error(output_lines),
"open formula": has_open_formula(output_lines)}
return failure_log
matching_premises = get_premises_that_match_conclusion_args(
premise_lines, conclusion)
premise_preds = [premise.split()[2] for premise in matching_premises]
conclusion_pred = conclusion.split()[0]
failure_log = make_failure_log(
conclusion_pred, premise_preds, conclusion, premise_lines, output_lines)
return failure_log
def make_failure_log(conclusion_pred, premise_preds, conclusion, premises,
coq_output_lines=None):
"""
Produces a dictionary with the following structure:
{"unproved sub-goal" : "sub-goal_predicate",
"matching premises" : ["premise1", "premise2", ...],
"raw sub-goal" : "conclusion",
"raw premises" : ["raw premise1", "raw premise2", ...]}
Raw sub-goal and raw premises are the coq lines with the premise
internal name and its predicates. E.g.
H : premise (Acc x1)
Note that this function is not capable of returning all unproved
sub-goals in coq's stack. We only return the top unproved sub-goal.
"""
failure_log = OrderedDict()
conclusion_base = denormalize_token(conclusion_pred)
# failure_log["unproved sub-goal"] = conclusion_base
premises_base = [denormalize_token(p) for p in premise_preds]
# failure_log["matching premises"] = premises_base
# failure_log["raw sub-goal"] = conclusion
# failure_log["raw premises"] = premises
premise_preds = []
for p in premises:
try:
pred = p.split()[2]
except:
continue
if pred.startswith('_'):
premise_preds.append(denormalize_token(pred))
failure_log["all_premises"] = premise_preds
failure_log["other_sub-goals"] = get_subgoals_from_coq_output(
coq_output_lines, premises)
failure_log["other_sub-goals"].insert(0, {
'subgoal': conclusion_base,
'index': 1,
'raw_subgoal': conclusion,
'matching_premises' : premises_base,
'matching_raw_premises' : premises_base})
failure_log["type_error"] = has_type_error(coq_output_lines)
failure_log["open_formula"] = has_open_formula(coq_output_lines)
return failure_log
def has_type_error(coq_output_lines):
for line in coq_output_lines:
if 'has type' in line and 'while it is expected to have type' in line:
return 'yes'
return 'no'
def has_open_formula(coq_output_lines):
for line in coq_output_lines:
if 'The type of this term is a product while it is expected to be' in line:
return 'yes'
if '(fun F' in line:
return 'yes'
return 'no'
def get_subgoals_from_coq_output(coq_output_lines, premises):
"""
When the proving is halted due to unprovable sub-goals,
Coq produces an output similar to this:
2 subgoals
H1 : True
H4 : True
x1 : Event
H6 : True
H3 : _play x1
H : _two (Subj x1)
H2 : _man (Subj x1)
H0 : _table (Acc x1)
H5 : _tennis (Acc x1)
============================
_ping (Acc x1)
subgoal 2 is:
_pong (Acc x1)
This function returns the remaining sub-goals ("_pong" in this example).
"""
subgoals = []
subgoal_index = -1
for line in coq_output_lines:
if line.strip() == '':
continue
line_tokens = line.split()
if subgoal_index > 0:
subgoal_line = line
subgoal_tokens = subgoal_line.split()
subgoal_pred = subgoal_tokens[0]
if subgoal_index in [s['index'] for s in subgoals]:
# This sub-goal has already appeared and is recorded.
subgoal_index = -1
continue
subgoal = {
'subgoal': denormalize_token(line_tokens[0]),
'index': subgoal_index,
'raw_subgoal': subgoal_line}
matching_premises = get_premises_that_match_conclusion_args(
premises, subgoal_line)
subgoal['matching_raw_premises'] = matching_premises
premise_preds = [
denormalize_token(premise.split()[2]) for premise in matching_premises]
subgoal['matching_premises'] = premise_preds
subgoals.append(subgoal)
subgoal_index = -1
if len(line_tokens) >= 3 and line_tokens[0] == 'subgoal' and line_tokens[2] == 'is:':
subgoal_index = int(line_tokens[1])
return subgoals
def parse_coq_line(coq_line):
try:
tree_args = tree_or_string('(' + coq_line + ')')
except ValueError:
tree_args = None
return tree_args
def get_tree_pred_args(line, is_conclusion=False):
"""
Given the string representation of a premise, where each premise is:
pX : predicate1 (arg1 arg2 arg3)
pY : predicate2 arg1
or the conclusion, which is of the form:
predicate3 (arg2 arg4)
returns a tree or a string with the arguments of the predicate.
"""
tree_args = None
if not is_conclusion:
tree_args = parse_coq_line(' '.join(line.split()[2:]))
else:
tree_args = parse_coq_line(line)
if tree_args is None or len(tree_args) < 1:
return None
return tree_args[0]
def get_predicate_arguments(premises, conclusion):
"""
Given the string representations of the premises, where each premises is:
pX : predicate1 arg1 arg2 arg3
and the conclusion, which is of the form:
predicate3 arg2 arg4
returns a dictionary where the key is a predicate, and the value
is a list of argument names.
If the same predicate is found with different arguments, then it is
labeled as a conflicting predicate and removed from the output.
Conflicting predicates are typically higher-order predicates, such
as "Prog".
"""
pred_args = {}
pred_trees = []
for premise in premises:
try:
pred_trees.append(
Tree.fromstring('(' + ' '.join(premise.split()[2:]) + ')'))
except ValueError:
continue
try:
conclusion_tree = Tree.fromstring('(' + conclusion + ')')
except ValueError:
return pred_args
pred_trees.append(conclusion_tree)
pred_args_list = []
for t in pred_trees:
pred = t.label()
args = t.leaves()
pred_args_list.append([pred] + args)
conflicting_predicates = set()
for pa in pred_args_list:
pred = pa[0]
args = pa[1:]
if pred in pred_args and pred_args[pred] != args:
conflicting_predicates.add(pred)
pred_args[pred] = args
logging.debug('Conflicting predicates: ' + str(conflicting_predicates))
for conf_pred in conflicting_predicates:
del pred_args[conf_pred]
return pred_args
| |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import locale
import random
import StringIO
import time
import threading
import uuid
import unittest
from nose import SkipTest
from ConfigParser import ConfigParser
from test import get_config
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \
MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH
default_constraints = dict((
('max_file_size', MAX_FILE_SIZE),
('max_meta_name_length', MAX_META_NAME_LENGTH),
('max_meta_value_length', MAX_META_VALUE_LENGTH),
('max_meta_count', MAX_META_COUNT),
('max_meta_overall_size', MAX_META_OVERALL_SIZE),
('max_object_name_length', MAX_OBJECT_NAME_LENGTH),
('container_listing_limit', CONTAINER_LISTING_LIMIT),
('account_listing_limit', ACCOUNT_LISTING_LIMIT),
('max_account_name_length', MAX_ACCOUNT_NAME_LENGTH),
('max_container_name_length', MAX_CONTAINER_NAME_LENGTH)))
constraints_conf = ConfigParser()
conf_exists = constraints_conf.read('/etc/swift/swift.conf')
# Constraints are set first from the test config, then from
# /etc/swift/swift.conf if it exists. If swift.conf doesn't exist,
# then limit test coverage. This allows SAIO tests to work fine but
# requires remote funtional testing to know something about the cluster
# that is being tested.
config = get_config('func_test')
for k in default_constraints:
if k in config:
# prefer what's in test.conf
config[k] = int(config[k])
elif conf_exists:
# swift.conf exists, so use what's defined there (or swift defaults)
# This normally happens when the test is running locally to the cluster
# as in a SAIO.
config[k] = default_constraints[k]
else:
# .functests don't know what the constraints of the tested cluster are,
# so the tests can't reliably pass or fail. Therefore, skip those
# tests.
config[k] = '%s constraint is not defined' % k
def load_constraint(name):
c = config[name]
if not isinstance(c, int):
raise SkipTest(c)
return c
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
def chunks(s, length=3):
i, j = 0, length
while i < len(s):
yield s[i:j]
i, j = j, j + length
def timeout(seconds, method, *args, **kwargs):
class TimeoutThread(threading.Thread):
def __init__(self, method, *args, **kwargs):
threading.Thread.__init__(self)
self.method = method
self.args = args
self.kwargs = kwargs
self.exception = None
def run(self):
try:
self.method(*self.args, **self.kwargs)
except Exception, e:
self.exception = e
t = TimeoutThread(method, *args, **kwargs)
t.start()
t.join(seconds)
if t.exception:
raise t.exception
if t.isAlive():
t._Thread__stop()
return True
return False
class Utils:
@classmethod
def create_ascii_name(cls, length=None):
return uuid.uuid4().hex
@classmethod
def create_utf8_name(cls, length=None):
if length is None:
length = 15
else:
length = int(length)
utf8_chars = u'\uF10F\uD20D\uB30B\u9409\u8508\u5605\u3703\u1801'\
u'\u0900\uF110\uD20E\uB30C\u940A\u8509\u5606\u3704'\
u'\u1802\u0901\uF111\uD20F\uB30D\u940B\u850A\u5607'\
u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\
u'\u5608\u3706\u1804\u0903\u03A9\u2603'
return ''.join([random.choice(utf8_chars)
for x in xrange(length)]).encode('utf-8')
create_name = create_ascii_name
class Base(unittest.TestCase):
def setUp(self):
cls = type(self)
if not cls.set_up:
cls.env.setUp()
cls.set_up = True
def assert_body(self, body):
response_body = self.env.conn.response.read()
self.assert_(response_body == body,
'Body returned: %s' % (response_body))
def assert_status(self, status_or_statuses):
self.assert_(self.env.conn.response.status == status_or_statuses or
(hasattr(status_or_statuses, '__iter__') and
self.env.conn.response.status in status_or_statuses),
'Status returned: %d Expected: %s' %
(self.env.conn.response.status, status_or_statuses))
class Base2(object):
def setUp(self):
Utils.create_name = Utils.create_utf8_name
super(Base2, self).setUp()
def tearDown(self):
Utils.create_name = Utils.create_ascii_name
class TestAccountEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.containers = []
for i in range(10):
cont = cls.account.container(Utils.create_name())
if not cont.create():
raise ResponseError(cls.conn.response)
cls.containers.append(cont)
class TestAccountDev(Base):
env = TestAccountEnv
set_up = False
class TestAccountDevUTF8(Base2, TestAccountDev):
set_up = False
class TestAccount(Base):
env = TestAccountEnv
set_up = False
def testNoAuthToken(self):
self.assertRaises(ResponseError, self.env.account.info,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
self.assertRaises(ResponseError, self.env.account.containers,
cfg={'no_auth_token': True})
self.assert_status([401, 412])
def testInvalidUTF8Path(self):
invalid_utf8 = Utils.create_utf8_name()[::-1]
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assert_body('Invalid UTF8 or contains NULL')
def testVersionOnlyPath(self):
self.env.account.conn.make_request('PUT',
cfg={'version_only_path': True})
self.assert_status(412)
self.assert_body('Bad URL')
def testInvalidPath(self):
was_url = self.env.account.conn.storage_url
self.env.account.conn.storage_url = "/%s" % was_url
self.env.account.conn.make_request('GET')
try:
self.assert_status(404)
finally:
self.env.account.conn.storage_url = was_url
def testPUT(self):
self.env.account.conn.make_request('PUT')
self.assert_status([403, 405])
def testAccountHead(self):
try_count = 0
while try_count < 5:
try_count += 1
info = self.env.account.info()
for field in ['object_count', 'container_count', 'bytes_used']:
self.assert_(info[field] >= 0)
if info['container_count'] == len(self.env.containers):
break
if try_count < 5:
time.sleep(1)
self.assertEquals(info['container_count'], len(self.env.containers))
self.assert_status(204)
def testContainerSerializedInfo(self):
container_info = {}
for container in self.env.containers:
info = {'bytes': 0}
info['count'] = random.randint(10, 30)
for i in range(info['count']):
file = container.file(Utils.create_name())
bytes = random.randint(1, 32768)
file.write_random(bytes)
info['bytes'] += bytes
container_info[container.name] = info
for format in ['json', 'xml']:
for a in self.env.account.containers(parms={'format': format}):
self.assert_(a['count'] >= 0)
self.assert_(a['bytes'] >= 0)
headers = dict(self.env.conn.response.getheaders())
if format == 'json':
self.assertEquals(headers['content-type'],
'application/json; charset=utf-8')
elif format == 'xml':
self.assertEquals(headers['content-type'],
'application/xml; charset=utf-8')
def testListingLimit(self):
limit = load_constraint('account_listing_limit')
for l in (1, 100, limit / 2, limit - 1, limit, limit + 1, limit * 2):
p = {'limit': l}
if l <= limit:
self.assert_(len(self.env.account.containers(parms=p)) <= l)
self.assert_status(200)
else:
self.assertRaises(ResponseError,
self.env.account.containers, parms=p)
self.assert_status(412)
def testContainerListing(self):
a = sorted([c.name for c in self.env.containers])
for format in [None, 'json', 'xml']:
b = self.env.account.containers(parms={'format': format})
if isinstance(b[0], dict):
b = [x['name'] for x in b]
self.assertEquals(a, b)
def testInvalidAuthToken(self):
hdrs = {'X-Auth-Token': 'bogus_auth_token'}
self.assertRaises(ResponseError, self.env.account.info, hdrs=hdrs)
self.assert_status(401)
def testLastContainerMarker(self):
for format in [None, 'json', 'xml']:
containers = self.env.account.containers({'format': format})
self.assertEquals(len(containers), len(self.env.containers))
self.assert_status(200)
containers = self.env.account.containers(
parms={'format': format, 'marker': containers[-1]})
self.assertEquals(len(containers), 0)
if format is None:
self.assert_status(204)
else:
self.assert_status(200)
def testMarkerLimitContainerList(self):
for format in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, 9)
containers = self.env.account.containers(
parms={'format': format, 'marker': marker, 'limit': limit})
self.assert_(len(containers) <= limit)
if containers:
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assert_(locale.strcoll(containers[0], marker) > 0)
def testContainersOrderedByName(self):
for format in [None, 'json', 'xml']:
containers = self.env.account.containers(
parms={'format': format})
if isinstance(containers[0], dict):
containers = [x['name'] for x in containers]
self.assertEquals(sorted(containers, cmp=locale.strcoll),
containers)
class TestAccountUTF8(Base2, TestAccount):
set_up = False
class TestAccountNoContainersEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
class TestAccountNoContainers(Base):
env = TestAccountNoContainersEnv
set_up = False
def testGetRequest(self):
for format in [None, 'json', 'xml']:
self.assert_(not self.env.account.containers(
parms={'format': format}))
if format is None:
self.assert_status(204)
else:
self.assert_status(200)
class TestAccountNoContainersUTF8(Base2, TestAccountNoContainers):
set_up = False
class TestContainerEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 10
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file = cls.container.file(Utils.create_name())
file.write_random(cls.file_size)
cls.files.append(file.name)
class TestContainerDev(Base):
env = TestContainerEnv
set_up = False
class TestContainerDevUTF8(Base2, TestContainerDev):
set_up = False
class TestContainer(Base):
env = TestContainerEnv
set_up = False
def testContainerNameLimit(self):
limit = load_constraint('max_container_name_length')
for l in (limit - 100, limit - 10, limit - 1, limit,
limit + 1, limit + 10, limit + 100):
cont = self.env.account.container('a' * l)
if l <= limit:
self.assert_(cont.create())
self.assert_status(201)
else:
self.assert_(not cont.create())
self.assert_status(400)
def testFileThenContainerDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file = cont.file(Utils.create_name())
self.assert_(file.write_random())
self.assert_(file.delete())
self.assert_status(204)
self.assert_(file.name not in cont.files())
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testFileListingLimitMarkerPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
files = sorted([Utils.create_name() for x in xrange(10)])
for f in files:
file = cont.file(f)
self.assert_(file.write_random())
for i in xrange(len(files)):
f = files[i]
for j in xrange(1, len(files) - i):
self.assert_(cont.files(parms={'limit': j, 'marker': f}) ==
files[i + 1: i + j + 1])
self.assert_(cont.files(parms={'marker': f}) == files[i + 1:])
self.assert_(cont.files(parms={'marker': f, 'prefix': f}) == [])
self.assert_(cont.files(parms={'prefix': f}) == [f])
def testPrefixAndLimit(self):
load_constraint('container_listing_limit')
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
prefix_file_count = 10
limit_count = 2
prefixs = ['alpha/', 'beta/', 'kappa/']
prefix_files = {}
all_files = []
for prefix in prefixs:
prefix_files[prefix] = []
for i in range(prefix_file_count):
file = cont.file(prefix + Utils.create_name())
file.write()
prefix_files[prefix].append(file.name)
for format in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'prefix': prefix})
self.assertEquals(files, sorted(prefix_files[prefix]))
for format in [None, 'json', 'xml']:
for prefix in prefixs:
files = cont.files(parms={'limit': limit_count,
'prefix': prefix})
self.assertEquals(len(files), limit_count)
for file in files:
self.assert_(file.startswith(prefix))
def testCreate(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.name in self.env.account.containers())
def testContainerFileListOnContainerThatDoesNotExist(self):
for format in [None, 'json', 'xml']:
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.files,
parms={'format': format})
self.assert_status(404)
def testUtf8Container(self):
valid_utf8 = Utils.create_utf8_name()
invalid_utf8 = valid_utf8[::-1]
container = self.env.account.container(valid_utf8)
self.assert_(container.create(cfg={'no_path_quote': True}))
self.assert_(container.name in self.env.account.containers())
self.assertEquals(container.files(), [])
self.assert_(container.delete())
container = self.env.account.container(invalid_utf8)
self.assert_(not container.create(cfg={'no_path_quote': True}))
self.assert_status(412)
self.assertRaises(ResponseError, container.files,
cfg={'no_path_quote': True})
self.assert_status(412)
def testCreateOnExisting(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.create())
self.assert_status(202)
def testSlashInName(self):
if Utils.create_name == Utils.create_utf8_name:
cont_name = list(unicode(Utils.create_name(), 'utf-8'))
else:
cont_name = list(Utils.create_name())
cont_name[random.randint(2, len(cont_name) - 2)] = '/'
cont_name = ''.join(cont_name)
if Utils.create_name == Utils.create_utf8_name:
cont_name = cont_name.encode('utf-8')
cont = self.env.account.container(cont_name)
self.assert_(not cont.create(cfg={'no_path_quote': True}),
'created container with name %s' % (cont_name))
self.assert_status(404)
self.assert_(cont.name not in self.env.account.containers())
def testDelete(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
self.assert_status(201)
self.assert_(cont.delete())
self.assert_status(204)
self.assert_(cont.name not in self.env.account.containers())
def testDeleteOnContainerThatDoesNotExist(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(not cont.delete())
self.assert_status(404)
def testDeleteOnContainerWithFiles(self):
cont = self.env.account.container(Utils.create_name())
self.assert_(cont.create())
file = cont.file(Utils.create_name())
file.write_random(self.env.file_size)
self.assert_(file.name in cont.files())
self.assert_(not cont.delete())
self.assert_status(409)
def testFileCreateInContainerThatDoesNotExist(self):
file = File(self.env.conn, self.env.account, Utils.create_name(),
Utils.create_name())
self.assertRaises(ResponseError, file.write)
self.assert_status(404)
def testLastFileMarker(self):
for format in [None, 'json', 'xml']:
files = self.env.container.files({'format': format})
self.assertEquals(len(files), len(self.env.files))
self.assert_status(200)
files = self.env.container.files(
parms={'format': format, 'marker': files[-1]})
self.assertEquals(len(files), 0)
if format is None:
self.assert_status(204)
else:
self.assert_status(200)
def testContainerFileList(self):
for format in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format})
self.assert_status(200)
if isinstance(files[0], dict):
files = [x['name'] for x in files]
for file in self.env.files:
self.assert_(file in files)
for file in files:
self.assert_(file in self.env.files)
def testMarkerLimitFileList(self):
for format in [None, 'json', 'xml']:
for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z',
'abc123', 'mnop', 'xyz']:
limit = random.randint(2, self.env.file_count - 1)
files = self.env.container.files(parms={'format': format,
'marker': marker,
'limit': limit})
if not files:
continue
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(len(files) <= limit)
if files:
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assert_(locale.strcoll(files[0], marker) > 0)
def testFileOrder(self):
for format in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format})
if isinstance(files[0], dict):
files = [x['name'] for x in files]
self.assertEquals(sorted(files, cmp=locale.strcoll), files)
def testContainerInfo(self):
info = self.env.container.info()
self.assert_status(204)
self.assertEquals(info['object_count'], self.env.file_count)
self.assertEquals(info['bytes_used'],
self.env.file_count * self.env.file_size)
def testContainerInfoOnContainerThatDoesNotExist(self):
container = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, container.info)
self.assert_status(404)
def testContainerFileListWithLimit(self):
for format in [None, 'json', 'xml']:
files = self.env.container.files(parms={'format': format,
'limit': 2})
self.assertEquals(len(files), 2)
def testTooLongName(self):
cont = self.env.account.container('x' * 257)
self.assert_(not cont.create(),
'created container with name %s' % (cont.name))
self.assert_status(400)
def testContainerExistenceCachingProblem(self):
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
cont.files()
cont = self.env.account.container(Utils.create_name())
self.assertRaises(ResponseError, cont.files)
self.assert_(cont.create())
file = cont.file(Utils.create_name())
file.write_random()
class TestContainerUTF8(Base2, TestContainer):
set_up = False
class TestContainerPathsEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.file_size = 8
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.files = [
'/file1',
'/file A',
'/dir1/',
'/dir2/',
'/dir1/file2',
'/dir1/subdir1/',
'/dir1/subdir2/',
'/dir1/subdir1/file2',
'/dir1/subdir1/file3',
'/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/',
'/dir1/subdir1/subsubdir1/file5',
'/dir1/subdir1/subsubdir1/file6',
'/dir1/subdir1/subsubdir1/file7',
'/dir1/subdir1/subsubdir1/file8',
'/dir1/subdir1/subsubdir2/',
'/dir1/subdir1/subsubdir2/file9',
'/dir1/subdir1/subsubdir2/file0',
'file1',
'dir1/',
'dir2/',
'dir1/file2',
'dir1/subdir1/',
'dir1/subdir2/',
'dir1/subdir1/file2',
'dir1/subdir1/file3',
'dir1/subdir1/file4',
'dir1/subdir1/subsubdir1/',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file6',
'dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir2/',
'dir1/subdir1/subsubdir2/file9',
'dir1/subdir1/subsubdir2/file0',
'dir1/subdir with spaces/',
'dir1/subdir with spaces/file B',
'dir1/subdir+with{whatever/',
'dir1/subdir+with{whatever/file D',
]
for f in cls.files:
file = cls.container.file(f)
if f.endswith('/'):
file.write(hdrs={'Content-Type': 'application/directory'})
else:
file.write_random(cls.file_size, hdrs={'Content-Type':
'application/directory'})
class TestContainerPaths(Base):
env = TestContainerPathsEnv
set_up = False
def testTraverseContainer(self):
found_files = []
found_dirs = []
def recurse_path(path, count=0):
if count > 10:
raise ValueError('too deep recursion')
for file in self.env.container.files(parms={'path': path}):
self.assert_(file.startswith(path))
if file.endswith('/'):
recurse_path(file, count + 1)
found_dirs.append(file)
else:
found_files.append(file)
recurse_path('')
for file in self.env.files:
if file.startswith('/'):
self.assert_(file not in found_dirs)
self.assert_(file not in found_files)
elif file.endswith('/'):
self.assert_(file in found_dirs)
self.assert_(file not in found_files)
else:
self.assert_(file in found_files)
self.assert_(file not in found_dirs)
found_files = []
found_dirs = []
recurse_path('/')
for file in self.env.files:
if not file.startswith('/'):
self.assert_(file not in found_dirs)
self.assert_(file not in found_files)
elif file.endswith('/'):
self.assert_(file in found_dirs)
self.assert_(file not in found_files)
else:
self.assert_(file in found_files)
self.assert_(file not in found_dirs)
def testContainerListing(self):
for format in (None, 'json', 'xml'):
files = self.env.container.files(parms={'format': format})
if isinstance(files[0], dict):
files = [str(x['name']) for x in files]
self.assertEquals(files, sorted(self.env.files))
for format in ('json', 'xml'):
for file in self.env.container.files(parms={'format': format}):
self.assert_(int(file['bytes']) >= 0)
self.assert_('last_modified' in file)
if file['name'].endswith('/'):
self.assertEquals(file['content_type'],
'application/directory')
def testStructure(self):
def assert_listing(path, list):
files = self.env.container.files(parms={'path': path})
self.assertEquals(sorted(list, cmp=locale.strcoll), files)
assert_listing('/', ['/dir1/', '/dir2/', '/file1', '/file A'])
assert_listing('/dir1',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/',
['/dir1/file2', '/dir1/subdir1/', '/dir1/subdir2/'])
assert_listing('/dir1/subdir1',
['/dir1/subdir1/subsubdir2/', '/dir1/subdir1/file2',
'/dir1/subdir1/file3', '/dir1/subdir1/file4',
'/dir1/subdir1/subsubdir1/'])
assert_listing('/dir1/subdir2', [])
assert_listing('', ['file1', 'dir1/', 'dir2/'])
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1/',
'dir1/subdir2/', 'dir1/subdir with spaces/',
'dir1/subdir+with{whatever/'])
assert_listing('dir1/subdir1',
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2/',
'dir1/subdir1/file2', 'dir1/subdir1/file3',
'dir1/subdir1/subsubdir1/'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir1/subsubdir1/',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir with spaces/',
['dir1/subdir with spaces/file B'])
class TestFileEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_size = 128
class TestFileDev(Base):
env = TestFileEnv
set_up = False
class TestFileDevUTF8(Base2, TestFileDev):
set_up = False
class TestFile(Base):
env = TestFileEnv
set_up = False
def testCopy(self):
# makes sure to test encoded characters"
source_filename = 'dealde%2Fl04 011e%204c8df/flash.png'
file = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
data = file.write_random()
file.sync_metadata(metadata)
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file = self.env.container.file(source_filename)
file.copy('%s%s' % (prefix, cont), dest_filename)
self.assert_(dest_filename in cont.files())
file = cont.file(dest_filename)
self.assert_(data == file.read())
self.assert_(file.initialize())
self.assert_(metadata == file.metadata)
def testCopy404s(self):
source_filename = Utils.create_name()
file = self.env.container.file(source_filename)
file.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
for prefix in ('', '/'):
# invalid source container
source_cont = self.env.account.container(Utils.create_name())
file = source_cont.file(source_filename)
self.assert_(not file.copy('%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid source object
file = self.env.container.file(Utils.create_name())
self.assert_(not file.copy('%s%s' % (prefix, self.env.container),
Utils.create_name()))
self.assert_status(404)
self.assert_(not file.copy('%s%s' % (prefix, dest_cont),
Utils.create_name()))
self.assert_status(404)
# invalid destination container
file = self.env.container.file(source_filename)
self.assert_(not file.copy('%s%s' % (prefix, Utils.create_name()),
Utils.create_name()))
def testCopyNoDestinationHeader(self):
source_filename = Utils.create_name()
file = self.env.container.file(source_filename)
file.write_random()
file = self.env.container.file(source_filename)
self.assert_(not file.copy(Utils.create_name(), Utils.create_name(),
cfg={'no_destination': True}))
self.assert_status(412)
def testCopyDestinationSlashProblems(self):
source_filename = Utils.create_name()
file = self.env.container.file(source_filename)
file.write_random()
# no slash
self.assert_(not file.copy(Utils.create_name(), Utils.create_name(),
cfg={'destination': Utils.create_name()}))
self.assert_status(412)
def testCopyFromHeader(self):
source_filename = Utils.create_name()
file = self.env.container.file(source_filename)
metadata = {}
for i in range(1):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file.metadata = metadata
data = file.write_random()
dest_cont = self.env.account.container(Utils.create_name())
self.assert_(dest_cont.create())
# copy both from within and across containers
for cont in (self.env.container, dest_cont):
# copy both with and without initial slash
for prefix in ('', '/'):
dest_filename = Utils.create_name()
file = cont.file(dest_filename)
file.write(hdrs={'X-Copy-From': '%s%s/%s' % (prefix,
self.env.container.name, source_filename)})
self.assert_(dest_filename in cont.files())
file = cont.file(dest_filename)
self.assert_(data == file.read())
self.assert_(file.initialize())
self.assert_(metadata == file.metadata)
def testCopyFromHeader404s(self):
source_filename = Utils.create_name()
file = self.env.container.file(source_filename)
file.write_random()
for prefix in ('', '/'):
# invalid source container
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
Utils.create_name(), source_filename)})
self.assert_status(404)
# invalid source object
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, Utils.create_name())})
self.assert_status(404)
# invalid destination container
dest_cont = self.env.account.container(Utils.create_name())
file = dest_cont.file(Utils.create_name())
self.assertRaises(ResponseError, file.write,
hdrs={'X-Copy-From': '%s%s/%s' %
(prefix,
self.env.container.name, source_filename)})
self.assert_status(404)
def testNameLimit(self):
limit = load_constraint('max_object_name_length')
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
file = self.env.container.file('a' * l)
if l <= limit:
self.assert_(file.write())
self.assert_status(201)
else:
self.assertRaises(ResponseError, file.write)
self.assert_status(400)
def testQuestionMarkInName(self):
if Utils.create_name == Utils.create_ascii_name:
file_name = list(Utils.create_name())
file_name[random.randint(2, len(file_name) - 2)] = '?'
file_name = "".join(file_name)
else:
file_name = Utils.create_name(6) + '?' + Utils.create_name(6)
file = self.env.container.file(file_name)
self.assert_(file.write(cfg={'no_path_quote': True}))
self.assert_(file_name not in self.env.container.files())
self.assert_(file_name.split('?')[0] in self.env.container.files())
def testDeleteThen404s(self):
file = self.env.container.file(Utils.create_name())
self.assert_(file.write_random())
self.assert_status(201)
self.assert_(file.delete())
self.assert_status(204)
file.metadata = {Utils.create_ascii_name(): Utils.create_name()}
for method in (file.info, file.read, file.sync_metadata,
file.delete):
self.assertRaises(ResponseError, method)
self.assert_status(404)
def testBlankMetadataName(self):
file = self.env.container.file(Utils.create_name())
file.metadata = {'': Utils.create_name()}
self.assertRaises(ResponseError, file.write_random)
self.assert_status(400)
def testMetadataNumberLimit(self):
number_limit = load_constraint('max_meta_count')
size_limit = load_constraint('max_meta_overall_size')
for i in (number_limit - 10, number_limit - 1, number_limit,
number_limit + 1, number_limit + 10, number_limit + 100):
j = size_limit / (i * 2)
size = 0
metadata = {}
while len(metadata.keys()) < i:
key = Utils.create_ascii_name()
val = Utils.create_name()
if len(key) > j:
key = key[:j]
val = val[:j]
size += len(key) + len(val)
metadata[key] = val
file = self.env.container.file(Utils.create_name())
file.metadata = metadata
if i <= number_limit:
self.assert_(file.write())
self.assert_status(201)
self.assert_(file.sync_metadata())
self.assert_status((201, 202))
else:
self.assertRaises(ResponseError, file.write)
self.assert_status(400)
file.metadata = {}
self.assert_(file.write())
self.assert_status(201)
file.metadata = metadata
self.assertRaises(ResponseError, file.sync_metadata)
self.assert_status(400)
def testContentTypeGuessing(self):
file_types = {'wav': 'audio/x-wav', 'txt': 'text/plain',
'zip': 'application/zip'}
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
for i in file_types.keys():
file = container.file(Utils.create_name() + '.' + i)
file.write('', cfg={'no_content_type': True})
file_types_read = {}
for i in container.files(parms={'format': 'json'}):
file_types_read[i['name'].split('.')[1]] = i['content_type']
self.assertEquals(file_types, file_types_read)
def testRangedGets(self):
file_length = 10000
range_size = file_length / 10
file = self.env.container.file(Utils.create_name())
data = file.write_random(file_length)
for i in range(0, file_length, range_size):
range_string = 'bytes=%d-%d' % (i, i + range_size - 1)
hdrs = {'Range': range_string}
self.assert_(data[i: i + range_size] == file.read(hdrs=hdrs),
range_string)
range_string = 'bytes=-%d' % (i)
hdrs = {'Range': range_string}
if i == 0:
# RFC 2616 14.35.1
# "If a syntactically valid byte-range-set includes ... at
# least one suffix-byte-range-spec with a NON-ZERO
# suffix-length, then the byte-range-set is satisfiable.
# Otherwise, the byte-range-set is unsatisfiable.
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(416)
else:
self.assertEquals(file.read(hdrs=hdrs), data[-i:])
range_string = 'bytes=%d-' % (i)
hdrs = {'Range': range_string}
self.assert_(file.read(hdrs=hdrs) == data[i - file_length:],
range_string)
range_string = 'bytes=%d-%d' % (file_length + 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(416)
range_string = 'bytes=%d-%d' % (file_length - 1000, file_length + 2000)
hdrs = {'Range': range_string}
self.assert_(file.read(hdrs=hdrs) == data[-1000:], range_string)
hdrs = {'Range': '0-4'}
self.assert_(file.read(hdrs=hdrs) == data, range_string)
# RFC 2616 14.35.1
# "If the entity is shorter than the specified suffix-length, the
# entire entity-body is used."
range_string = 'bytes=-%d' % (file_length + 10)
hdrs = {'Range': range_string}
self.assert_(file.read(hdrs=hdrs) == data, range_string)
def testRangedGetsWithLWSinHeader(self):
#Skip this test until webob 1.2 can tolerate LWS in Range header.
file_length = 10000
range_size = file_length / 10
file = self.env.container.file(Utils.create_name())
data = file.write_random(file_length)
for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999',
'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '):
self.assert_(file.read(hdrs={'Range': r}) == data[0:1000])
def testFileSizeLimit(self):
limit = load_constraint('max_file_size')
tsecs = 3
for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1,
limit + 10, limit + 100):
file = self.env.container.file(Utils.create_name())
if i <= limit:
self.assert_(timeout(tsecs, file.write,
cfg={'set_content_length': i}))
else:
self.assertRaises(ResponseError, timeout, tsecs,
file.write, cfg={'set_content_length': i})
def testNoContentLengthForPut(self):
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.write, 'testing',
cfg={'no_content_length': True})
self.assert_status(411)
def testDelete(self):
file = self.env.container.file(Utils.create_name())
file.write_random(self.env.file_size)
self.assert_(file.name in self.env.container.files())
self.assert_(file.delete())
self.assert_(file.name not in self.env.container.files())
def testBadHeaders(self):
file_length = 100
# no content type on puts should be ok
file = self.env.container.file(Utils.create_name())
file.write_random(file_length, cfg={'no_content_type': True})
self.assert_status(201)
# content length x
self.assertRaises(ResponseError, file.write_random, file_length,
hdrs={'Content-Length': 'X'},
cfg={'no_content_length': True})
self.assert_status(400)
# bad request types
#for req in ('LICK', 'GETorHEAD_base', 'container_info',
# 'best_response'):
for req in ('LICK', 'GETorHEAD_base'):
self.env.account.conn.make_request(req)
self.assert_status(405)
# bad range headers
self.assert_(len(file.read(hdrs={'Range': 'parsecs=8-12'})) ==
file_length)
self.assert_status(200)
def testMetadataLengthLimits(self):
key_limit = load_constraint('max_meta_name_length')
value_limit = load_constraint('max_meta_value_length')
lengths = [[key_limit, value_limit], [key_limit, value_limit + 1],
[key_limit + 1, value_limit], [key_limit, 0],
[key_limit, value_limit * 10],
[key_limit * 10, value_limit]]
for l in lengths:
metadata = {'a' * l[0]: 'b' * l[1]}
file = self.env.container.file(Utils.create_name())
file.metadata = metadata
if l[0] <= key_limit and l[1] <= value_limit:
self.assert_(file.write())
self.assert_status(201)
self.assert_(file.sync_metadata())
else:
self.assertRaises(ResponseError, file.write)
self.assert_status(400)
file.metadata = {}
self.assert_(file.write())
self.assert_status(201)
file.metadata = metadata
self.assertRaises(ResponseError, file.sync_metadata)
self.assert_status(400)
def testEtagWayoff(self):
file = self.env.container.file(Utils.create_name())
hdrs = {'etag': 'reallylonganddefinitelynotavalidetagvalue'}
self.assertRaises(ResponseError, file.write_random, hdrs=hdrs)
self.assert_status(422)
def testFileCreate(self):
for i in range(10):
file = self.env.container.file(Utils.create_name())
data = file.write_random()
self.assert_status(201)
self.assert_(data == file.read())
self.assert_status(200)
def testHead(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file = self.env.container.file(file_name)
file.content_type = content_type
file.write_random(self.env.file_size)
md5 = file.md5
file = self.env.container.file(file_name)
info = file.info()
self.assert_status(200)
self.assertEquals(info['content_length'], self.env.file_size)
self.assertEquals(info['etag'], md5)
self.assertEquals(info['content_type'], content_type)
self.assert_('last_modified' in info)
def testDeleteOfFileThatDoesNotExist(self):
# in container that exists
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.delete)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file = container.file(Utils.create_name())
self.assertRaises(ResponseError, file.delete)
self.assert_status(404)
def testHeadOnFileThatDoesNotExist(self):
# in container that exists
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.info)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file = container.file(Utils.create_name())
self.assertRaises(ResponseError, file.info)
self.assert_status(404)
def testMetadataOnPost(self):
file = self.env.container.file(Utils.create_name())
file.write_random(self.env.file_size)
for i in range(10):
metadata = {}
for i in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file.metadata = metadata
self.assert_(file.sync_metadata())
self.assert_status((201, 202))
file = self.env.container.file(file.name)
self.assert_(file.initialize())
self.assert_status(200)
self.assertEquals(file.metadata, metadata)
def testGetContentType(self):
file_name = Utils.create_name()
content_type = Utils.create_name()
file = self.env.container.file(file_name)
file.content_type = content_type
file.write_random()
file = self.env.container.file(file_name)
file.read()
self.assertEquals(content_type, file.content_type)
def testGetOnFileThatDoesNotExist(self):
# in container that exists
file = self.env.container.file(Utils.create_name())
self.assertRaises(ResponseError, file.read)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file = container.file(Utils.create_name())
self.assertRaises(ResponseError, file.read)
self.assert_status(404)
def testPostOnFileThatDoesNotExist(self):
# in container that exists
file = self.env.container.file(Utils.create_name())
file.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file.sync_metadata)
self.assert_status(404)
# in container that does not exist
container = self.env.account.container(Utils.create_name())
file = container.file(Utils.create_name())
file.metadata['Field'] = 'Value'
self.assertRaises(ResponseError, file.sync_metadata)
self.assert_status(404)
def testMetadataOnPut(self):
for i in range(10):
metadata = {}
for j in range(10):
metadata[Utils.create_ascii_name()] = Utils.create_name()
file = self.env.container.file(Utils.create_name())
file.metadata = metadata
file.write_random(self.env.file_size)
file = self.env.container.file(file.name)
self.assert_(file.initialize())
self.assert_status(200)
self.assertEquals(file.metadata, metadata)
def testSerialization(self):
container = self.env.account.container(Utils.create_name())
self.assert_(container.create())
files = []
for i in (0, 1, 10, 100, 1000, 10000):
files.append({'name': Utils.create_name(),
'content_type': Utils.create_name(), 'bytes': i})
write_time = time.time()
for f in files:
file = container.file(f['name'])
file.content_type = f['content_type']
file.write_random(f['bytes'])
f['hash'] = file.md5
f['json'] = False
f['xml'] = False
write_time = time.time() - write_time
for format in ['json', 'xml']:
for file in container.files(parms={'format': format}):
found = False
for f in files:
if f['name'] != file['name']:
continue
self.assertEquals(file['content_type'],
f['content_type'])
self.assertEquals(int(file['bytes']), f['bytes'])
d = datetime.strptime(file['last_modified'].split('.')[0],
"%Y-%m-%dT%H:%M:%S")
lm = time.mktime(d.timetuple())
if 'last_modified' in f:
self.assertEquals(f['last_modified'], lm)
else:
f['last_modified'] = lm
f[format] = True
found = True
self.assert_(found, 'Unexpected file %s found in '
'%s listing' % (file['name'], format))
headers = dict(self.env.conn.response.getheaders())
if format == 'json':
self.assertEquals(headers['content-type'],
'application/json; charset=utf-8')
elif format == 'xml':
self.assertEquals(headers['content-type'],
'application/xml; charset=utf-8')
lm_diff = max([f['last_modified'] for f in files]) -\
min([f['last_modified'] for f in files])
self.assert_(lm_diff < write_time + 1, 'Diff in last '
'modified times should be less than time to write files')
for f in files:
for format in ['json', 'xml']:
self.assert_(f[format], 'File %s not found in %s listing'
% (f['name'], format))
def testStackedOverwrite(self):
file = self.env.container.file(Utils.create_name())
for i in range(1, 11):
data = file.write_random(512)
file.write(data)
self.assert_(file.read() == data)
def testTooLongName(self):
file = self.env.container.file('x' * 1025)
self.assertRaises(ResponseError, file.write)
self.assert_status(400)
def testZeroByteFile(self):
file = self.env.container.file(Utils.create_name())
self.assert_(file.write(''))
self.assert_(file.name in self.env.container.files())
self.assert_(file.read() == '')
def testEtagResponse(self):
file = self.env.container.file(Utils.create_name())
data = StringIO.StringIO(file.write_random(512))
etag = File.compute_md5sum(data)
headers = dict(self.env.conn.response.getheaders())
self.assert_('etag' in headers.keys())
header_etag = headers['etag'].strip('"')
self.assertEquals(etag, header_etag)
def testChunkedPut(self):
data = File.random_data(10000)
etag = File.compute_md5sum(data)
for i in (1, 10, 100, 1000):
file = self.env.container.file(Utils.create_name())
for j in chunks(data, i):
file.chunked_write(j)
self.assert_(file.chunked_write())
self.assert_(data == file.read())
info = file.info()
self.assertEquals(etag, info['etag'])
class TestFileUTF8(Base2, TestFile):
set_up = False
class TestFileComparisonEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.file_count = 20
cls.file_size = 128
cls.files = list()
for x in range(cls.file_count):
file = cls.container.file(Utils.create_name())
file.write_random(cls.file_size)
cls.files.append(file)
cls.time_old = time.asctime(time.localtime(time.time() - 86400))
cls.time_new = time.asctime(time.localtime(time.time() + 86400))
class TestFileComparison(Base):
env = TestFileComparisonEnv
set_up = False
def testIfMatch(self):
for file in self.env.files:
hdrs = {'If-Match': file.md5}
self.assert_(file.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus'}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(412)
def testIfNoneMatch(self):
for file in self.env.files:
hdrs = {'If-None-Match': 'bogus'}
self.assert_(file.read(hdrs=hdrs))
hdrs = {'If-None-Match': file.md5}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(304)
def testIfModifiedSince(self):
for file in self.env.files:
hdrs = {'If-Modified-Since': self.env.time_old}
self.assert_(file.read(hdrs=hdrs))
hdrs = {'If-Modified-Since': self.env.time_new}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(304)
def testIfUnmodifiedSince(self):
for file in self.env.files:
hdrs = {'If-Unmodified-Since': self.env.time_new}
self.assert_(file.read(hdrs=hdrs))
hdrs = {'If-Unmodified-Since': self.env.time_old}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(412)
def testIfMatchAndUnmodified(self):
for file in self.env.files:
hdrs = {'If-Match': file.md5,
'If-Unmodified-Since': self.env.time_new}
self.assert_(file.read(hdrs=hdrs))
hdrs = {'If-Match': 'bogus',
'If-Unmodified-Since': self.env.time_new}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(412)
hdrs = {'If-Match': file.md5,
'If-Unmodified-Since': self.env.time_old}
self.assertRaises(ResponseError, file.read, hdrs=hdrs)
self.assert_status(412)
class TestFileComparisonUTF8(Base2, TestFileComparison):
set_up = False
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_serialization import jsonutils
import routes
import six
from six.moves import http_client as http
import webob
import glance.api.common
import glance.common.config
from glance.common import timeutils
import glance.context
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import models as db_models
from glance.registry.api import v2 as rserver
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = _gen_uuid()
UUID2 = _gen_uuid()
class TestRegistryRPC(base.IsolatedUnitTest):
def setUp(self):
super(TestRegistryRPC, self).setUp()
self.mapper = routes.Mapper()
self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper),
is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
{'id': UUID1,
'name': 'fake image #1',
'status': 'active',
'disk_format': 'ami',
'container_format': 'ami',
'visibility': 'shared',
'created_at': uuid1_time,
'updated_at': uuid1_time,
'deleted_at': None,
'deleted': False,
'checksum': None,
'min_disk': 0,
'min_ram': 0,
'size': 13,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1),
'metadata': {}, 'status': 'active'}],
'properties': {'type': 'kernel'}},
{'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'visibility': 'public',
'created_at': uuid2_time,
'updated_at': uuid2_time,
'deleted_at': None,
'deleted': False,
'checksum': None,
'min_disk': 5,
'min_ram': 256,
'size': 19,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2),
'metadata': {}, 'status': 'active'}],
'properties': {}}]
self.context = glance.context.RequestContext(is_admin=True)
db_api.get_engine()
self.destroy_fixtures()
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryRPC, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
for fixture in self.FIXTURES:
db_api.image_create(self.context, fixture)
# We write a fake image file to the filesystem
with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image:
image.write(b"chunk00000remainder")
image.flush()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api.get_engine())
db_models.register_models(db_api.get_engine())
def _compare_images_and_uuids(self, uuids, images):
self.assertListEqual(uuids, [image['id'] for image in images])
def test_show(self):
"""Tests that registry API endpoint returns the expected image."""
fixture = {'id': UUID2,
'name': 'fake image #2',
'size': 19,
'min_ram': 256,
'min_disk': 5,
'checksum': None}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get',
'kwargs': {'image_id': UUID2},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
image = res_dict
for k, v in six.iteritems(fixture):
self.assertEqual(v, image[k])
def test_show_unknown(self):
"""Tests the registry API endpoint returns 404 for an unknown id."""
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get',
'kwargs': {'image_id': _gen_uuid()},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual('glance.common.exception.ImageNotFound',
res_dict["_error"]["cls"])
def test_get_index(self):
"""Tests that the image_get_all command returns list of images."""
fixture = {'id': UUID2,
'name': 'fake image #2',
'size': 19,
'checksum': None}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': fixture},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(1, len(images))
for k, v in six.iteritems(fixture):
self.assertEqual(v, images[0][k])
def test_get_index_marker(self):
"""Tests that the registry API returns list of public images.
Must conforms to a marker query param.
"""
uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid5_time + datetime.timedelta(seconds=5)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid5_time,
'updated_at': uuid5_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID4, "is_public": True},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
# should be sorted by created_at desc, id desc
# page should start after marker 4
uuid_list = [UUID5, UUID2]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_marker_and_name_asc(self):
"""Test marker and null name ascending
Tests that the registry API returns 200
when a marker and a null name are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['name'],
'sort_dir': ['asc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(2, len(images))
def test_get_index_marker_and_name_desc(self):
"""Test marker and null name descending
Tests that the registry API returns 200
when a marker and a null name are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['name'],
'sort_dir': ['desc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
def test_get_index_marker_and_disk_format_asc(self):
"""Test marker and null disk format ascending
Tests that the registry API returns 200
when a marker and a null disk_format are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': None,
'container_format': 'ovf',
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'],
'sort_dir': ['asc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(2, len(images))
def test_get_index_marker_and_disk_format_desc(self):
"""Test marker and null disk format descending
Tests that the registry API returns 200
when a marker and a null disk_format are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': None,
'container_format': 'ovf',
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['disk_format'],
'sort_dir': ['desc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
def test_get_index_marker_and_container_format_asc(self):
"""Test marker and null container format ascending
Tests that the registry API returns 200
when a marker and a null container_format are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': None,
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['container_format'],
'sort_dir': ['asc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(2, len(images))
def test_get_index_marker_and_container_format_desc(self):
"""Test marker and null container format descending
Tests that the registry API returns 200
when a marker and a null container_format are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': None,
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': ['container_format'],
'sort_dir': ['desc']},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
def test_get_index_unknown_marker(self):
"""Tests the registry API returns a NotFound with unknown marker."""
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': _gen_uuid()},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
result = jsonutils.loads(res.body)[0]
self.assertIn("_error", result)
self.assertIn("NotFound", result["_error"]["cls"])
def test_get_index_limit(self):
"""Tests that the registry API returns list of public images.
Must conforms to a limit query param.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid3_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'limit': 1},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
images = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
self._compare_images_and_uuids([UUID4], images)
def test_get_index_limit_marker(self):
"""Tests that the registry API returns list of public images.
Must conforms to limit and marker query params.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid3_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'limit': 1},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
images = res_dict
self._compare_images_and_uuids([UUID2], images)
def test_get_index_filter_name(self):
"""Tests that the registry API returns list of public images.
Use a specific name. This is really a sanity check, filtering is
tested more in-depth using /images/detail
"""
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'name': 'new name! #123'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
images = res_dict
self.assertEqual(2, len(images))
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_get_index_filter_on_user_defined_properties(self):
"""Tests that the registry API returns list of public images.
Use a specific user-defined properties.
"""
properties = {'distro': 'ubuntu', 'arch': 'i386', 'type': 'kernel'}
extra_id = _gen_uuid()
extra_fixture = {'id': extra_id,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'image-extra-1',
'size': 19, 'properties': properties,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
# testing with a common property.
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(2, len(images))
self.assertEqual(extra_id, images[0]['id'])
self.assertEqual(UUID1, images[1]['id'])
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with a non-existent property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with multiple existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'distro': 'ubuntu'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(1, len(images))
self.assertEqual(extra_id, images[0]['id'])
# testing with multiple existing properties but non-existent values.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random', 'distro': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with multiple non-existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'typo': 'random', 'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
# testing with one existing property and the other non-existing.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'poo': 'random'}},
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
images = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(images))
def test_get_index_sort_default_created_at_desc(self):
"""Tests that the registry API returns list of public images.
Must conforms to a default sort key/dir.
"""
uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid5_time + datetime.timedelta(seconds=5)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid5_time,
'updated_at': uuid5_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
images = res_dict
# (flaper87)registry's v1 forced is_public to True
# when no value was specified. This is not
# the default behaviour anymore.
uuid_list = [UUID3, UUID4, UUID5, UUID2, UUID1]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_name_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by name in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['name'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_status_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by status in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'queued',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['status'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID2, UUID4, UUID3]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_disk_format_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by disk_format in ascending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vdi',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['disk_format'], 'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID3, UUID4, UUID2]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_container_format_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted alphabetically by container_format in descending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'iso',
'container_format': 'bare',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['container_format'],
'sort_dir': ['desc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID2, UUID4, UUID3, UUID1]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_size_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 100,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'iso',
'container_format': 'bare',
'name': 'xyz',
'size': 2,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['size'],
'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID4, UUID1, UUID2, UUID3]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_created_at_asc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['created_at'],
'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID2, UUID4, UUID3]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_updated_at_desc(self):
"""Tests that the registry API returns list of public images.
Must be sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': None,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['updated_at'],
'sort_dir': ['desc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID3, UUID4, UUID2, UUID1]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_multiple_keys_one_sort_dir(self):
"""
Tests that the registry API returns list of
public images sorted by name-size and size-name with ascending
sort direction.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': None,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['name', 'size'],
'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID3, UUID5, UUID1, UUID2, UUID4]
self._compare_images_and_uuids(uuid_list, images)
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['size', 'name'],
'sort_dir': ['asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID3, UUID2, UUID5, UUID4]
self._compare_images_and_uuids(uuid_list, images)
def test_get_index_sort_multiple_keys_multiple_sort_dirs(self):
"""
Tests that the registry API returns list of
public images sorted by name-size and size-name
with ascending and descending directions.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': None,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['name', 'size'],
'sort_dir': ['desc', 'asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID4, UUID2, UUID1, UUID3, UUID5]
self._compare_images_and_uuids(uuid_list, images)
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['size', 'name'],
'sort_dir': ['desc', 'asc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID5, UUID4, UUID3, UUID2, UUID1]
self._compare_images_and_uuids(uuid_list, images)
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['name', 'size'],
'sort_dir': ['asc', 'desc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID5, UUID3, UUID1, UUID2, UUID4]
self._compare_images_and_uuids(uuid_list, images)
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': ['size', 'name'],
'sort_dir': ['asc', 'desc']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
uuid_list = [UUID1, UUID2, UUID3, UUID4, UUID5]
self._compare_images_and_uuids(uuid_list, images)
def test_create_image(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
for k, v in six.iteritems(fixture):
self.assertEqual(v, res_dict[k])
# Test status was updated properly
self.assertEqual('active', res_dict['status'])
def test_create_image_with_min_disk(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'visibility': 'public',
'status': 'active',
'min_disk': 5,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(fixture['min_disk'], res_dict['min_disk'])
def test_create_image_with_min_ram(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'visibility': 'public',
'status': 'active',
'min_ram': 256,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(fixture['min_ram'], res_dict['min_ram'])
def test_create_image_with_min_ram_default(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(0, res_dict['min_ram'])
def test_create_image_with_min_disk_default(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'visibility': 'public',
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(0, res_dict['min_disk'])
def test_update_image(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'min_disk': 5,
'min_ram': 256,
'disk_format': 'raw'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_update',
'kwargs': {'values': fixture,
'image_id': UUID2}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
res_dict = jsonutils.loads(res.body)[0]
self.assertNotEqual(res_dict['created_at'],
res_dict['updated_at'])
for k, v in six.iteritems(fixture):
self.assertEqual(v, res_dict[k])
def _send_request(self, command, kwargs, method):
req = webob.Request.blank('/rpc')
req.method = method
cmd = [{'command': command, 'kwargs': kwargs}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
return res.status_int, res_dict
def _expect_fail(self, command, kwargs, error_cls, method='POST'):
# on any exception status_int is always 200, so have to check _error
# dict
code, res_dict = self._send_request(command, kwargs, method)
self.assertIn('_error', res_dict)
self.assertEqual(error_cls, res_dict['_error']['cls'])
return res_dict
def _expect_ok(self, command, kwargs, method, expected_status=http.OK):
code, res_dict = self._send_request(command, kwargs)
self.assertEqual(expected_status, code)
return res_dict
def test_create_image_bad_name(self):
fixture = {'name': u'A bad name \U0001fff2', 'status': 'queued'}
self._expect_fail('image_create',
{'values': fixture},
'glance.common.exception.Invalid')
def test_create_image_bad_location(self):
fixture = {'status': 'queued',
'locations': [{'url': u'file:///tmp/tests/\U0001fee2',
'metadata': {},
'status': 'active'}]}
self._expect_fail('image_create',
{'values': fixture},
'glance.common.exception.Invalid')
def test_create_image_bad_property(self):
fixture = {'status': 'queued',
'properties': {'ok key': u' bad value \U0001f2aa'}}
self._expect_fail('image_create',
{'values': fixture},
'glance.common.exception.Invalid')
fixture = {'status': 'queued',
'properties': {u'invalid key \U00010020': 'ok value'}}
self._expect_fail('image_create',
{'values': fixture},
'glance.common.exception.Invalid')
def test_update_image_bad_tag(self):
self._expect_fail('image_tag_create',
{'value': u'\U0001fff2', 'image_id': UUID2},
'glance.common.exception.Invalid')
def test_update_image_bad_name(self):
fixture = {'name': u'A bad name \U0001fff2'}
self._expect_fail('image_update',
{'values': fixture, 'image_id': UUID1},
'glance.common.exception.Invalid')
def test_update_image_bad_location(self):
fixture = {'locations':
[{'url': u'file:///tmp/glance-tests/\U0001fee2',
'metadata': {},
'status': 'active'}]}
self._expect_fail('image_update',
{'values': fixture, 'image_id': UUID1},
'glance.common.exception.Invalid')
def test_update_bad_property(self):
fixture = {'properties': {'ok key': u' bad value \U0001f2aa'}}
self._expect_fail('image_update',
{'values': fixture, 'image_id': UUID2},
'glance.common.exception.Invalid')
fixture = {'properties': {u'invalid key \U00010020': 'ok value'}}
self._expect_fail('image_update',
{'values': fixture, 'image_id': UUID2},
'glance.common.exception.Invalid')
def test_delete_image(self):
"""Tests that the registry API deletes the image"""
# Grab the original number of images
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'deleted': False}}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
orig_num_images = len(res_dict)
# Delete image #2
cmd = [{
'command': 'image_destroy',
'kwargs': {'image_id': UUID2}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
# Verify one less image
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'deleted': False}}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(http.OK, res.status_int)
new_num_images = len(res_dict)
self.assertEqual(new_num_images, orig_num_images - 1)
def test_delete_image_response(self):
"""Tests that the registry API delete returns the image metadata"""
image = self.FIXTURES[0]
req = webob.Request.blank('/rpc')
req.method = 'POST'
cmd = [{
'command': 'image_destroy',
'kwargs': {'image_id': image['id']}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
deleted_image = jsonutils.loads(res.body)[0]
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
def test_get_image_members(self):
"""Tests members listing for existing images."""
req = webob.Request.blank('/rpc')
req.method = 'POST'
cmd = [{
'command': 'image_member_find',
'kwargs': {'image_id': UUID2}
}]
req.body = jsonutils.dump_as_bytes(cmd)
res = req.get_response(self.api)
self.assertEqual(http.OK, res.status_int)
memb_list = jsonutils.loads(res.body)[0]
self.assertEqual(0, len(memb_list))
| |
import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
normalB = glob.glob("binary_position_RRBS_normal_B_cell*")
mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*")
pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*")
cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*")
print(len(normalB))
print(len(mcell))
print(len(pcell))
print(len(cd19cell))
totalfiles = normalB + mcell + pcell + cd19cell
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr16"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG",
"RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG",
"RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG",
"RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG",
"RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC",
"RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG",
"RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG",
"RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG",
"RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC",
"RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG",
"RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC",
"RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC",
"RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC",
"RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC",
"RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC",
"RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"]
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("normal_chrom16.phy", header=None, index=None)
print(tott.shape)
| |
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See LICENSE
from typing import Dict, List
import frappe
import frappe.utils
from frappe import _
from frappe.website.website_generator import WebsiteGenerator
from frappe.utils.verified_command import get_signed_params, verify_request
from frappe.email.doctype.email_group.email_group import add_subscribers
from .exceptions import NewsletterAlreadySentError, NoRecipientFoundError, NewsletterNotSavedError
class Newsletter(WebsiteGenerator):
def validate(self):
self.route = f"newsletters/{self.name}"
self.validate_sender_address()
self.validate_recipient_address()
self.validate_publishing()
@property
def newsletter_recipients(self) -> List[str]:
if getattr(self, "_recipients", None) is None:
self._recipients = self.get_recipients()
return self._recipients
@frappe.whitelist()
def get_sending_status(self):
count_by_status = frappe.get_all("Email Queue",
filters={"reference_doctype": self.doctype, "reference_name": self.name},
fields=["status", "count(name) as count"],
group_by="status",
order_by="status"
)
sent = 0
total = 0
for row in count_by_status:
if row.status == "Sent":
sent = row.count
total += row.count
return {'sent': sent, 'total': total}
@frappe.whitelist()
def send_test_email(self, email):
test_emails = frappe.utils.validate_email_address(email, throw=True)
self.send_newsletter(emails=test_emails)
frappe.msgprint(_("Test email sent to {0}").format(email), alert=True)
@frappe.whitelist()
def find_broken_links(self):
from bs4 import BeautifulSoup
import requests
html = self.get_message()
soup = BeautifulSoup(html, "html.parser")
links = soup.find_all("a")
images = soup.find_all("img")
broken_links = []
for el in links + images:
url = el.attrs.get("href") or el.attrs.get("src")
try:
response = requests.head(url, verify=False, timeout=5)
if response.status_code >= 400:
broken_links.append(url)
except:
broken_links.append(url)
return broken_links
@frappe.whitelist()
def send_emails(self):
"""queue sending emails to recipients"""
self.schedule_sending = False
self.schedule_send = None
self.queue_all()
frappe.msgprint(_("Email queued to {0} recipients").format(self.total_recipients))
def validate_send(self):
"""Validate if Newsletter can be sent.
"""
self.validate_newsletter_status()
self.validate_newsletter_recipients()
def validate_newsletter_status(self):
if self.email_sent:
frappe.throw(_("Newsletter has already been sent"), exc=NewsletterAlreadySentError)
if self.get("__islocal"):
frappe.throw(_("Please save the Newsletter before sending"), exc=NewsletterNotSavedError)
def validate_newsletter_recipients(self):
if not self.newsletter_recipients:
frappe.throw(_("Newsletter should have atleast one recipient"), exc=NoRecipientFoundError)
self.validate_recipient_address()
def validate_sender_address(self):
"""Validate self.send_from is a valid email address or not.
"""
if self.sender_email:
frappe.utils.validate_email_address(self.sender_email, throw=True)
self.send_from = f"{self.sender_name} <{self.sender_email}>" if self.sender_name else self.sender_email
def validate_recipient_address(self):
"""Validate if self.newsletter_recipients are all valid email addresses or not.
"""
for recipient in self.newsletter_recipients:
frappe.utils.validate_email_address(recipient, throw=True)
def validate_publishing(self):
if self.send_webview_link and not self.published:
frappe.throw(_("Newsletter must be published to send webview link in email"))
def get_linked_email_queue(self) -> List[str]:
"""Get list of email queue linked to this newsletter.
"""
return frappe.get_all("Email Queue",
filters={
"reference_doctype": self.doctype,
"reference_name": self.name,
},
pluck="name",
)
def get_success_recipients(self) -> List[str]:
"""Recipients who have already recieved the newsletter.
Couldn't think of a better name ;)
"""
return frappe.get_all("Email Queue Recipient",
filters={
"status": ("in", ["Not Sent", "Sending", "Sent"]),
"parentfield": ("in", self.get_linked_email_queue()),
},
pluck="recipient",
)
def get_pending_recipients(self) -> List[str]:
"""Get list of pending recipients of the newsletter. These
recipients may not have receive the newsletter in the previous iteration.
"""
return [
x for x in self.newsletter_recipients if x not in self.get_success_recipients()
]
def queue_all(self):
"""Queue Newsletter to all the recipients generated from the `Email Group` table
"""
self.validate()
self.validate_send()
recipients = self.get_pending_recipients()
self.send_newsletter(emails=recipients)
self.email_sent = True
self.email_sent_at = frappe.utils.now()
self.total_recipients = len(recipients)
self.save()
def get_newsletter_attachments(self) -> List[Dict[str, str]]:
"""Get list of attachments on current Newsletter
"""
return [{"file_url": row.attachment} for row in self.attachments]
def send_newsletter(self, emails: List[str]):
"""Trigger email generation for `emails` and add it in Email Queue.
"""
attachments = self.get_newsletter_attachments()
sender = self.send_from or frappe.utils.get_formatted_email(self.owner)
args = self.as_dict()
args["message"] = self.get_message()
is_auto_commit_set = bool(frappe.db.auto_commit_on_many_writes)
frappe.db.auto_commit_on_many_writes = not frappe.flags.in_test
frappe.sendmail(
subject=self.subject,
sender=sender,
recipients=emails,
attachments=attachments,
template="newsletter",
add_unsubscribe_link=self.send_unsubscribe_link,
unsubscribe_method="/unsubscribe",
unsubscribe_params={"name": self.name},
reference_doctype=self.doctype,
reference_name=self.name,
queue_separately=True,
send_priority=0,
args=args,
)
frappe.db.auto_commit_on_many_writes = is_auto_commit_set
def get_message(self) -> str:
message = self.message
if self.content_type == "Markdown":
message = frappe.utils.md_to_html(self.message_md)
if self.content_type == "HTML":
message = self.message_html
return frappe.render_template(message, {"doc": self.as_dict()})
def get_recipients(self) -> List[str]:
"""Get recipients from Email Group"""
emails = frappe.get_all(
"Email Group Member",
filters={"unsubscribed": 0, "email_group": ("in", self.get_email_groups())},
pluck="email",
)
return list(set(emails))
def get_email_groups(self) -> List[str]:
# wondering why the 'or'? i can't figure out why both aren't equivalent - @gavin
return [
x.email_group for x in self.email_group
] or frappe.get_all(
"Newsletter Email Group",
filters={"parent": self.name, "parenttype": "Newsletter"},
pluck="email_group",
)
def get_attachments(self) -> List[Dict[str, str]]:
return frappe.get_all(
"File",
fields=["name", "file_name", "file_url", "is_private"],
filters={
"attached_to_name": self.name,
"attached_to_doctype": "Newsletter",
"is_private": 0,
},
)
@frappe.whitelist(allow_guest=True)
def confirmed_unsubscribe(email, group):
""" unsubscribe the email(user) from the mailing list(email_group) """
frappe.flags.ignore_permissions = True
doc = frappe.get_doc("Email Group Member", {"email": email, "email_group": group})
if not doc.unsubscribed:
doc.unsubscribed = 1
doc.save(ignore_permissions=True)
@frappe.whitelist(allow_guest=True)
def subscribe(email, email_group=_("Website")):
"""API endpoint to subscribe an email to a particular email group. Triggers a confirmation email.
"""
# build subscription confirmation URL
api_endpoint = frappe.utils.get_url(
"/api/method/frappe.email.doctype.newsletter.newsletter.confirm_subscription"
)
signed_params = get_signed_params({"email": email, "email_group": email_group})
confirm_subscription_url = f"{api_endpoint}?{signed_params}"
# fetch custom template if available
email_confirmation_template = frappe.db.get_value(
"Email Group", email_group, "confirmation_email_template"
)
# build email and send
if email_confirmation_template:
args = {"email": email, "confirmation_url": confirm_subscription_url, "email_group": email_group}
email_template = frappe.get_doc("Email Template", email_confirmation_template)
email_subject = email_template.subject
content = frappe.render_template(email_template.response, args)
else:
email_subject = _("Confirm Your Email")
translatable_content = (
_("Thank you for your interest in subscribing to our updates"),
_("Please verify your Email Address"),
confirm_subscription_url,
_("Click here to verify"),
)
content = """
<p>{0}. {1}.</p>
<p><a href="{2}">{3}</a></p>
""".format(*translatable_content)
frappe.sendmail(
email,
subject=email_subject,
content=content,
now=True,
)
@frappe.whitelist(allow_guest=True)
def confirm_subscription(email, email_group=_("Website")):
"""API endpoint to confirm email subscription.
This endpoint is called when user clicks on the link sent to their mail.
"""
if not verify_request():
return
if not frappe.db.exists("Email Group", email_group):
frappe.get_doc({"doctype": "Email Group", "title": email_group}).insert(
ignore_permissions=True
)
frappe.flags.ignore_permissions = True
add_subscribers(email_group, email)
frappe.db.commit()
frappe.respond_as_web_page(
_("Confirmed"),
_("{0} has been successfully added to the Email Group.").format(email),
indicator_color="green",
)
def get_list_context(context=None):
context.update({
"show_search": True,
"no_breadcrumbs": True,
"title": _("Newsletters"),
"filters": {"published": 1},
"row_template": "email/doctype/newsletter/templates/newsletter_row.html",
})
def send_scheduled_email():
"""Send scheduled newsletter to the recipients."""
scheduled_newsletter = frappe.get_all(
"Newsletter",
filters={
"schedule_send": ("<=", frappe.utils.now_datetime()),
"email_sent": False,
"schedule_sending": True,
},
ignore_ifnull=True,
pluck="name",
)
for newsletter in scheduled_newsletter:
try:
frappe.get_doc("Newsletter", newsletter).queue_all()
except Exception:
frappe.db.rollback()
# wasn't able to send emails :(
frappe.db.set_value("Newsletter", newsletter, "email_sent", 0)
message = (
f"Newsletter {newsletter} failed to send"
"\n\n"
f"Traceback: {frappe.get_traceback()}"
)
frappe.log_error(title="Send Newsletter", message=message)
if not frappe.flags.in_test:
frappe.db.commit()
| |
'''
Created on 2013-1-25
@author: desperedo
miniPascal Compiler Parser
'''
class Type(object):
def __init__(self):
self.Type = None;
self.Array = False;
self.ArrayLow = None;
self.ArrayHigh = None;
def Parse(self, Scanner):
if Scanner.TokenExpected('Type'):
self.Type = Scanner.NextToken()[1];
elif Scanner.TokenExpected('Array'):
self.Array = True;
Scanner.NextToken();
if not Scanner.TokenNeeded('['):
raise SyntaxError('"[" expected');
self.ArrayLow = Expression().Parse(Scanner);
if not Scanner.TokenNeeded('..'):
raise SyntaxError('".." expected');
self.ArrayHigh = Expression().Parse(Scanner);
if not Scanner.TokenNeeded(']'):
raise SyntaxError('"]" expected');
if not Scanner.TokenNeeded('Of'):
raise SyntaxError('"Of" expected');
self.Type = Type().Parse(Scanner);
else:
Token = Scanner.NextToken();
raise SyntaxError('Type-name expected but "%s".' % (Token[1] or Token[0]));
return self;
class Constant(object):
def __init__(self):
self.Name = '';
self.Value = None;
self.Type = Type();
def Parse(self, Scanner):
self.Name = Scanner.NextToken()[1];
if Scanner.TokenExpected(':'):
Scanner.NextToken();
self.Type.Parse(Scanner);
if not Scanner.TokenNeeded('='):
raise SyntaxError('"=" expected.');
if self.Type.Type == None:
self.Type.Type, self.Value = Scanner.NextToken();
else:
if self.Type.Array:
raise SyntaxError('Arrays cannot be as constant.');
Type, self.Value = Scanner.NextToken();
if Type.lower() != self.Type.Type.lower():
raise SyntaxError('Constant type inconsistent.');
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
return self;
class Variable(object):
def __init__(self):
self.Names = [];
self.Array = False;
self.Type = Type();
self.ArrayLow = Expression();
self.ArrayHigh = Expression();
def Parse(self, Scanner, Parameter = False):
if Parameter:
self.Variable = False;
if Scanner.TokenExpected('Var'):
self.Variable = True;
Scanner.NextToken();
while Scanner.TokenExpected('Identifier'):
self.Names.append(Scanner.NextToken()[1]);
if not Scanner.TokenExpected(','):
if not Scanner.TokenNeeded(':'):
raise SyntaxError('":" expected.');
break;
Scanner.NextToken();
self.Type.Parse(Scanner);
if not Parameter and not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
if Parameter and self.Type.Array and self.Variable:
raise SyntaxError('Arrays cannot be as variable parameters.');
return self;
class Function(object):
def __init__(self):
self.Name = '';
self.Forward = False;
self.Parameters = [];
self.ReturnType = Type();
self.Statements = Statements();
self.Declarations = Declarations();
def Parse(self, Scanner):
Scanner.NextToken();
Type, self.Name = Scanner.NextToken();
if Type != 'Identifier':
raise SyntaxError('Identifier expected.');
if Scanner.TokenExpected('('):
Scanner.NextToken();
if Scanner.TokensExpected(['Var', 'Identifier']):
self.Parameters.append(Variable().Parse(Scanner, True));
while Scanner.TokenExpected(';'):
Scanner.NextToken();
self.Parameters.append(Variable().Parse(Scanner, True));
if not Scanner.TokenNeeded(')'):
raise SyntaxError('")" expected.');
if not Scanner.TokenNeeded(':'):
raise SyntaxError('":" expected.');
self.ReturnType.Parse(Scanner);
if self.ReturnType.Array:
raise SyntaxError('Arrays cannot be as return type.');
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
if Scanner.TokenExpected('Forward'):
self.Forward = True;
Scanner.NextToken();
else:
self.Declarations.Parse(Scanner);
self.Statements.Parse(Scanner);
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
return self;
class Procedure(object):
def __init__(self):
self.Name = '';
self.Forward = False;
self.Parameters = [];
self.Statements = Statements();
self.Declarations = Declarations();
def Parse(self, Scanner):
Scanner.NextToken();
Type, self.Name = Scanner.NextToken();
if Type != 'Identifier':
raise SyntaxError('Identifier expected.');
if Scanner.TokenExpected('('):
Scanner.NextToken();
if Scanner.TokensExpected(['Var', 'Identifier']):
self.Parameters.append(Variable().Parse(Scanner, True));
while Scanner.TokenExpected(';'):
Scanner.NextToken();
self.Parameters.append(Variable().Parse(Scanner, True));
if not Scanner.TokenNeeded(')'):
raise SyntaxError('")" expected.');
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
if Scanner.TokenExpected('Forward'):
self.Forward = True;
Scanner.NextToken();
else:
self.Declarations.Parse(Scanner);
self.Statements.Parse(Scanner);
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
return self;
class Factor(object):
def __init__(self):
self.Type = None;
self.Value = None;
self.Prefix = None;
def Parse(self, Scanner):
Type, Value = Scanner.PeekToken();
if Type == '(':
Scanner.NextToken();
self.Type = 'Expression';
self.Value = Expression().Parse(Scanner);
if not Scanner.TokenNeeded(')'):
raise SyntaxError('")" expected.');
elif Type == '+':
Scanner.NextToken();
self.Prefix = '+';
self.Type = 'Factor';
self.Value = Factor().Parse(Scanner);
elif Type == '-':
Scanner.NextToken();
self.Prefix = '-';
self.Type = 'Factor';
self.Value = Factor().Parse(Scanner);
elif Type == 'Not':
Scanner.NextToken();
self.Prefix = 'Not';
self.Type = 'Factor';
self.Value = Factor().Parse(Scanner);
elif Type in ['Char', 'Real', 'String', 'Integer', 'Boolean']:
Scanner.NextToken();
self.Type = Type;
self.Value = Value;
elif Type in ['Type', 'Identifier']:
self.Type = Type;
self.Value = Value;
if Scanner.TokenExpected('(', 1):
self.Type = 'Invoking';
self.Value = Invoking().Parse(Scanner, True);
elif Scanner.TokenExpected('[', 1):
Scanner.NextToken();
Scanner.NextToken();
self.Type = 'Indexing';
self.Indexer = [Expression().Parse(Scanner)];
while Scanner.TokenExpected(','):
Scanner.NextToken();
self.Indexer.append(Expression().Parse(Scanner));
if not Scanner.TokenNeeded(']'):
raise SyntaxError('"]" expected.');
else:
Scanner.NextToken();
else:
raise SyntaxError('Unexpected token "%s".' % (Value or Type));
return self;
class Term(object):
def __init__(self):
self.Factors = [];
self.Factor = Factor();
def Parse(self, Scanner):
self.Factor.Parse(Scanner);
while Scanner.PeekToken()[0] in ['*', '/', 'Div', 'Mod', 'And', 'Shl', 'Shr']:
Operator = Scanner.NextToken()[0];
self.Factors.append((Operator, Factor().Parse(Scanner)));
return self;
class SimpleExpr(object):
def __init__(self):
self.Terms = [];
self.Term = Term();
def Parse(self, Scanner):
self.Term.Parse(Scanner);
while Scanner.PeekToken()[0] in ['+', '-', 'Or', 'Xor']:
Operator = Scanner.NextToken()[0];
self.Terms.append((Operator, Term().Parse(Scanner)));
return self;
class Expression(object):
def __init__(self):
self.Operator = None;
self.Operand1 = SimpleExpr();
self.Operand2 = None;
def Parse(self, Scanner):
self.Operand1.Parse(Scanner);
if Scanner.PeekToken()[0] in ['<', '>', '<=', '>=', '<>', '=']:
self.Operator = Scanner.NextToken()[0];
self.Operand2 = SimpleExpr().Parse(Scanner);
return self;
class If(object):
def __init__(self):
self.Expression = Expression();
self.TrueStatement = Statement();
self.FalseStatement = None;
def Parse(self, Scanner):
Scanner.NextToken();
self.Expression.Parse(Scanner);
if not Scanner.TokenNeeded('Then'):
raise SyntaxError('"then" expected.');
self.TrueStatement.Parse(Scanner);
if Scanner.TokenExpected('Else'):
Scanner.NextToken();
self.FalseStatement = Statement().Parse(Scanner);
return self;
class For(object):
def __init__(self):
self.Type = None;
self.Init = Assignment();
self.Stop = Expression();
self.Statement = Statement();
def Parse(self, Scanner):
Scanner.NextToken();
if not Scanner.TokensExpected(['[', ':='], 1):
raise SyntaxError('For-loop must have an initialization.');
self.Init.Parse(Scanner);
if Scanner.TokenExpected('To'):
self.Type = 'Inc';
Scanner.NextToken();
elif Scanner.TokenExpected('DownTo'):
self.Type = 'Dec';
Scanner.NextToken();
else:
raise SyntaxError('"to" or "downto" expected.');
self.Stop.Parse(Scanner);
if not Scanner.TokenNeeded('Do'):
raise SyntaxError('"do" expected.');
self.Statement.Parse(Scanner);
return self;
class While(object):
def __init__(self):
self.Statement = Statement();
self.Expression = Expression();
def Parse(self, Scanner):
Scanner.NextToken();
self.Expression.Parse(Scanner);
if not Scanner.TokenNeeded('Do'):
raise SyntaxError('"do" expected.');
self.Statement.Parse(Scanner);
return self;
class Repeat(object):
def __init__(self):
self.Statements = [];
self.Expression = Expression();
def Parse(self, Scanner):
Scanner.NextToken();
while not Scanner.TokenExpected('Until'):
if Scanner.PeekToken()[0] == None:
raise SyntaxError('"until" expected.');
self.Statements.append(Statement().Parse(Scanner));
if not Scanner.TokenExpected('Until') and not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
Scanner.NextToken();
self.Expression.Parse(Scanner);
return self;
class Case(object):
def __init__(self):
self.Statements = {};
self.ConstantType = None;
self.DefaultStatement = None;
self.Expression = Expression();
def Parse(self, Scanner):
Scanner.NextToken();
self.Expression.Parse(Scanner);
if not Scanner.TokenNeeded('Of'):
raise SyntaxError('"of" expected.');
while not Scanner.TokenExpected('End'):
if Scanner.TokenExpected('Else'):
if len(self.Statements) == 0:
raise SyntaxError('Else-statement cannot be placed at top.');
if self.DefaultStatement != None:
raise SyntaxError('A case-statement should only have one else-statement.');
Scanner.NextToken();
self.DefaultStatement = Statement().Parse(Scanner);
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
else:
if self.ConstantType == None:
self.ConstantType = Scanner.PeekToken()[0];
if self.ConstantType not in ['Char', 'Integer']:
raise SyntaxError('Case-item should be ordinary type.');
Type, Key = Scanner.NextToken();
if Type != self.ConstantType:
raise SyntaxError('Inconsistent case-item type');
if not Scanner.TokenNeeded(':'):
raise SyntaxError('":" expected.');
if Key in self.Statements.keys():
raise SyntaxError('Duplicate case-item.');
self.Statements[Key] = Statement().Parse(Scanner);
if not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
Scanner.NextToken();
return self;
class Invoking(object):
def __init__(self):
self.Name = '';
self.Factor = False;
self.Parameters = [];
def Parse(self, Scanner, Factor = False):
self.Factor = Factor;
Type, self.Name = Scanner.NextToken();
if Type not in ['Type', 'Identifier']:
raise SyntaxError('Identifier expected.');
if Scanner.TokenExpected('('):
Scanner.NextToken();
while not Scanner.TokenExpected(')'):
self.Parameters.append(Expression().Parse(Scanner));
if not Scanner.TokenExpected(','):
break;
Scanner.NextToken();
if not Scanner.TokenNeeded(')'):
raise SyntaxError('")" expected.');
return self;
class Assignment(object):
def __init__(self):
self.Target = '';
self.Indexer = None;
self.Expression = Expression();
def Parse(self, Scanner):
Type, self.Target = Scanner.NextToken();
if Type != 'Identifier':
raise SyntaxError('Identifier expected.');
if Scanner.NextToken()[0] == ':=':
self.Expression.Parse(Scanner);
else:
self.Indexer = [Expression().Parse(Scanner)];
while Scanner.TokenExpected(','):
Scanner.NextToken();
self.Indexer.append(Expression().Parse(Scanner));
if not Scanner.TokenNeeded(']'):
raise SyntaxError('"]" expected.');
if not Scanner.TokenNeeded(':='):
raise SyntaxError('":=" expected.');
self.Expression.Parse(Scanner);
return self;
class Statement(object):
def __init__(self):
self.Type = 'Empty';
self.Statement = None;
def Parse(self, Scanner):
if Scanner.TokenExpected(';'):
pass
elif Scanner.TokenExpected('If'):
self.Type = 'If';
self.Statement = If().Parse(Scanner);
elif Scanner.TokenExpected('For'):
self.Type = 'For';
self.Statement = For().Parse(Scanner);
elif Scanner.TokenExpected('Case'):
self.Type = 'Case';
self.Statement = Case().Parse(Scanner);
elif Scanner.TokenExpected('While'):
self.Type = 'While';
self.Statement = While().Parse(Scanner);
elif Scanner.TokenExpected('Repeat'):
self.Type = 'Repeat';
self.Statement = Repeat().Parse(Scanner);
elif Scanner.TokenExpected('Begin'):
self.Type = 'Block';
self.Statement = Statements().Parse(Scanner);
elif Scanner.TokensExpected(['[', ':='], 1):
self.Type = 'Assignment';
self.Statement = Assignment().Parse(Scanner);
elif Scanner.TokenExpected('(', 1) or (Scanner.TokenExpected('Identifier') and Scanner.TokensExpected([';', 'End'], 1)):
self.Type = 'Invoking';
self.Statement = Invoking().Parse(Scanner);
else:
print(Scanner.PeekToken(1));
print(Scanner.PeekToken(2));
Token = Scanner.NextToken();
raise SyntaxError('Unexpected token "%s".' % (Token[1] or Token[0]));
return self;
class Declarations(object):
def __init__(self):
self.Declarations = [];
def Parse(self, Scanner):
while True:
if Scanner.TokenExpected('Function'):
self.Declarations.append(('Function', Function().Parse(Scanner)));
elif Scanner.TokenExpected('Procedure'):
self.Declarations.append(('Procedure', Procedure().Parse(Scanner)));
elif Scanner.TokenExpected('Var'):
Scanner.NextToken();
if not Scanner.TokenExpected('Identifier'):
raise SyntaxError('Identifier expected.');
while Scanner.TokenExpected('Identifier'):
self.Declarations.append(('Variable', Variable().Parse(Scanner)));
elif Scanner.TokenExpected('Const'):
Scanner.NextToken();
if not Scanner.TokenExpected('Identifier'):
raise SyntaxError('Identifier expected.');
while Scanner.TokenExpected('Identifier'):
self.Declarations.append(('Constant', Constant().Parse(Scanner)));
else:
break;
return self;
class Statements(object):
def __init__(self):
self.Statements = [];
def Parse(self, Scanner):
if not Scanner.TokenNeeded('Begin'):
raise SyntaxError('"begin" expected.');
while not Scanner.TokenExpected('End'):
if Scanner.PeekToken()[0] == None:
raise SyntaxError('"end" expected.');
self.Statements.append(Statement().Parse(Scanner));
if not Scanner.TokenExpected('End') and not Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected.');
if not Scanner.TokenNeeded('End'):
raise SyntaxError('"end" expected.');
return self;
class Program(object):
def __init__(self, Scanner):
self.Name = '';
self.Scanner = Scanner;
self.Statements = Statements();
self.Declarations = Declarations();
def Parse(self):
if not self.Scanner.TokenNeeded('Program'):
raise SyntaxError('"program" expected.');
Token, self.Name = self.Scanner.NextToken();
if Token != 'Identifier':
raise SyntaxError('identifier expected.');
if not self.Scanner.TokenNeeded(';'):
raise SyntaxError('";" expected');
self.Declarations.Parse(self.Scanner);
self.Statements.Parse(self.Scanner);
if not self.Scanner.TokenNeeded('.'):
raise SyntaxError('"." expected');
return self;
| |
"""Support for Coinbase sensors."""
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from .const import (
API_ACCOUNT_AMOUNT,
API_ACCOUNT_BALANCE,
API_ACCOUNT_CURRENCY,
API_ACCOUNT_ID,
API_ACCOUNT_NAME,
API_ACCOUNT_NATIVE_BALANCE,
API_RATES,
API_RESOURCE_TYPE,
API_TYPE_VAULT,
CONF_CURRENCIES,
CONF_EXCHANGE_RATES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
ATTR_NATIVE_BALANCE = "Balance in native currency"
CURRENCY_ICONS = {
"BTC": "mdi:currency-btc",
"ETH": "mdi:currency-eth",
"EUR": "mdi:currency-eur",
"LTC": "mdi:litecoin",
"USD": "mdi:currency-usd",
}
DEFAULT_COIN_ICON = "mdi:currency-usd-circle"
ATTRIBUTION = "Data provided by coinbase.com"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Coinbase sensor platform."""
instance = hass.data[DOMAIN][config_entry.entry_id]
entities = []
provided_currencies = [
account[API_ACCOUNT_CURRENCY]
for account in instance.accounts
if account[API_RESOURCE_TYPE] != API_TYPE_VAULT
]
desired_currencies = []
if CONF_CURRENCIES in config_entry.options:
desired_currencies = config_entry.options[CONF_CURRENCIES]
exchange_base_currency = instance.exchange_rates[API_ACCOUNT_CURRENCY]
for currency in desired_currencies:
if currency not in provided_currencies:
_LOGGER.warning(
"The currency %s is no longer provided by your account, please check "
"your settings in Coinbase's developer tools",
currency,
)
continue
entities.append(AccountSensor(instance, currency))
if CONF_EXCHANGE_RATES in config_entry.options:
for rate in config_entry.options[CONF_EXCHANGE_RATES]:
entities.append(
ExchangeRateSensor(
instance,
rate,
exchange_base_currency,
)
)
async_add_entities(entities)
class AccountSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, currency):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self._currency = currency
for account in coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._name = f"Coinbase {account[API_ACCOUNT_NAME]}"
self._id = (
f"coinbase-{account[API_ACCOUNT_ID]}-wallet-"
f"{account[API_ACCOUNT_CURRENCY]}"
)
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._unit_of_measurement = account[API_ACCOUNT_CURRENCY]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the Unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self._unit_of_measurement, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_NATIVE_BALANCE: f"{self._native_balance} {self._native_currency}",
}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
for account in self._coinbase_data.accounts:
if (
account[API_ACCOUNT_CURRENCY] == self._currency
and account[API_RESOURCE_TYPE] != API_TYPE_VAULT
):
self._state = account[API_ACCOUNT_BALANCE][API_ACCOUNT_AMOUNT]
self._native_balance = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_AMOUNT
]
self._native_currency = account[API_ACCOUNT_NATIVE_BALANCE][
API_ACCOUNT_CURRENCY
]
break
class ExchangeRateSensor(SensorEntity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, exchange_currency, exchange_base):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self.currency = exchange_currency
self._name = f"{exchange_currency} Exchange Rate"
self._id = f"coinbase-{coinbase_data.user_id}-xe-{exchange_currency}"
self._state = round(
1 / float(self._coinbase_data.exchange_rates[API_RATES][self.currency]), 2
)
self._unit_of_measurement = exchange_base
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the sensor."""
return self._id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self.currency, DEFAULT_COIN_ICON)
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
self._state = round(
1 / float(self._coinbase_data.exchange_rates.rates[self.currency]), 2
)
| |
#!/usr/bin/env python
#
# "THE BEER-WARE LICENSE" (Revision 43~maze)
#
# <maze@pyth0n.org> wrote these files. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return.
import atexit
import math
import os
import random
import re
import sys
import time
PY3 = sys.version_info >= (3,)
STRIP_ANSI = re.compile(r'\x1b\[(\d+)(;\d+)?(;\d+)?[m|K]')
COLOR_ANSI = (
(0x00, 0x00, 0x00), (0xcd, 0x00, 0x00),
(0x00, 0xcd, 0x00), (0xcd, 0xcd, 0x00),
(0x00, 0x00, 0xee), (0xcd, 0x00, 0xcd),
(0x00, 0xcd, 0xcd), (0xe5, 0xe5, 0xe5),
(0x7f, 0x7f, 0x7f), (0xff, 0x00, 0x00),
(0x00, 0xff, 0x00), (0xff, 0xff, 0x00),
(0x5c, 0x5c, 0xff), (0xff, 0x00, 0xff),
(0x00, 0xff, 0xff), (0xff, 0xff, 0xff),
)
class LolCat(object):
def __init__(self, mode=256, output=sys.stdout):
self.mode = mode
self.output = output
def _distance(self, rgb1, rgb2):
return sum(map(lambda c: (c[0] - c[1]) ** 2,
zip(rgb1, rgb2)))
def ansi(self, rgb):
r, g, b = rgb
if self.mode in (8, 16):
colors = COLOR_ANSI[:self.mode]
matches = [(self._distance(c, map(int, rgb)), i) for i, c in enumerate(colors)]
matches.sort()
color = matches[0][1]
return '3%d' % (color,)
else:
gray_possible = True
sep = 2.5
while gray_possible:
if r < sep or g < sep or b < sep:
gray = r < sep and g < sep and b < sep
gray_possible = False
sep += 42.5
if gray:
color = 232 + int(float(sum(rgb) / 33.0))
else:
color = sum([16] + [int(6 * float(val) / 256) * mod
for val, mod in zip(rgb, [36, 6, 1])])
return '38;5;%d' % (color,)
def wrap(self, *codes):
return '\x1b[%sm' % (''.join(codes),)
def rainbow(self, freq, i):
r = math.sin(freq * i) * 127 + 128
g = math.sin(freq * i + 2 * math.pi / 3) * 127 + 128
b = math.sin(freq * i + 4 * math.pi / 3) * 127 + 128
return [r, g, b]
def cat(self, fd, options):
if options.animate:
self.output.write('\x1b[?25l')
for line in fd:
options.os += 1
self.println(line, options)
if options.animate:
self.output.write('\x1b[?25h')
def get_line(self, s, offset, spread=3.0):
if spread is None:
spread = 99999.0
# if options.force or self.output.isatty():
s = STRIP_ANSI.sub('', s)
r = ''
if isinstance(s, str) and not PY3: # str are alrady utf8 in python3
s = s.decode('utf-8', 'replace')
# We must consider the classic CMD as a no tty, as it's just too limited
if os.name == 'nt':
if os.environ.get('ANSICON', '') == '':
return s
for i, c in enumerate(s):
rgb = self.rainbow(0.1, offset + i / spread)
if isinstance(c, str) and not PY3:
c = c.encode('utf-8', 'replace')
r += u''.join([self.wrap(self.ansi(rgb)), c if PY3 else c, ])
r += '\x1b[0m'
return r
def println(self, s, options):
s = s.rstrip()
if options.force or self.output.isatty():
s = STRIP_ANSI.sub('', s)
if options.animate:
self.println_ani(s, options)
else:
self.println_plain(s, options)
self.output.write('\n')
self.output.flush()
def println_ani(self, s, options):
if not s:
return
for i in range(1, options.duration):
self.output.write('\x1b[%dD' % (len(s),))
self.output.flush()
options.os += options.spread
self.println_plain(s, options)
time.sleep(1.0 / options.speed)
def println_plain(self, s, options):
for i, c in enumerate(s if PY3 else s.decode(options.charset_py2, 'replace')):
rgb = self.rainbow(options.freq, options.os + i / 3.0)
self.output.write(''.join([
self.wrap(self.ansi(rgb)),
c if PY3 else c.encode(options.charset_py2, 'replace'),
]))
def detect_mode(term_hint='xterm-256color'):
'''
Poor-mans color mode detection.
'''
if 'ANSICON' in os.environ:
return 16
elif os.environ.get('ConEmuANSI', 'OFF') == 'ON':
return 256
else:
term = os.environ.get('TERM', term_hint)
if term.endswith('-256color') or term in ('xterm', 'screen'):
return 256
elif term.endswith('-color') or term in ('rxvt',):
return 16
else:
return 256 # optimistic default
def run():
"""Main entry point."""
import optparse
# Reset terminal colors at exit
def reset():
sys.stdout.write('\x1b[0m')
sys.stdout.flush()
atexit.register(reset)
parser = optparse.OptionParser(usage=r'%prog [<options>] [file ...]')
parser.add_option('-p', '--spread', type='float', default=3.0,
help='Rainbow spread')
parser.add_option('-F', '--freq', type='float', default=0.1,
help='Rainbow frequency')
parser.add_option('-S', '--seed', type='int', default=0,
help='Rainbow seed')
parser.add_option('-a', '--animate', action='store_true', default=False,
help='Enable psychedelics')
parser.add_option('-d', '--duration', type='int', default=12,
help='Animation duration')
parser.add_option('-s', '--speed', type='float', default=20.0,
help='Animation speed')
parser.add_option('-f', '--force', action='store_true', default=False,
help='Force colour even when stdout is not a tty')
parser.add_option('-3', action='store_const', dest='mode', const=8,
help='Force 3 bit colour mode')
parser.add_option('-4', action='store_const', dest='mode', const=16,
help='Force 4 bit colour mode')
parser.add_option('-8', action='store_const', dest='mode', const=256,
help='Force 8 bit colour mode')
parser.add_option('-c', '--charset-py2', default='utf-8',
help='Manually set a charset to convert from, for python 2.7')
options, args = parser.parse_args()
options.os = random.randint(0, 256) if options.seed == 0 else options.seed
options.mode = options.mode or detect_mode()
lolcat = LolCat(mode=options.mode)
if not args:
args = ['-']
for filename in args:
if filename == '-':
lolcat.cat(sys.stdin, options)
else:
try:
with open(filename, 'r') as handle:
lolcat.cat(handle, options)
except IOError as error:
sys.stderr.write(str(error) + '\n')
lolcat = LolCat(mode=detect_mode())
if __name__ == '__main__':
sys.exit(run())
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkManagementClientOperationsMixin:
async def _put_bastion_shareable_link_initial(
self,
resource_group_name: str,
bastion_host_name: str,
bsl_request: "_models.BastionShareableLinkListRequest",
**kwargs: Any
) -> Optional["_models.BastionShareableLinkListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BastionShareableLinkListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._put_bastion_shareable_link_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BastionShareableLinkListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_put_bastion_shareable_link_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/createShareableLinks'} # type: ignore
async def begin_put_bastion_shareable_link(
self,
resource_group_name: str,
bastion_host_name: str,
bsl_request: "_models.BastionShareableLinkListRequest",
**kwargs: Any
) -> AsyncLROPoller[AsyncItemPaged["_models.BastionShareableLinkListResult"]]:
"""Creates a Bastion Shareable Links for all the VMs specified in the request.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param bsl_request: Post request for all the Bastion Shareable Link endpoints.
:type bsl_request: ~azure.mgmt.network.v2020_05_01.models.BastionShareableLinkListRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either BastionShareableLinkListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionShareableLinkListResult]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionShareableLinkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.put_bastion_shareable_link.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionShareableLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionShareableLinkListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._put_bastion_shareable_link_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
bsl_request=bsl_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(
internal_get_next, extract_data
)
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_put_bastion_shareable_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/createShareableLinks'} # type: ignore
async def _delete_bastion_shareable_link_initial(
self,
resource_group_name: str,
bastion_host_name: str,
bsl_request: "_models.BastionShareableLinkListRequest",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._delete_bastion_shareable_link_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_bastion_shareable_link_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/deleteShareableLinks'} # type: ignore
async def begin_delete_bastion_shareable_link(
self,
resource_group_name: str,
bastion_host_name: str,
bsl_request: "_models.BastionShareableLinkListRequest",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the Bastion Shareable Links for all the VMs specified in the request.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param bsl_request: Post request for all the Bastion Shareable Link endpoints.
:type bsl_request: ~azure.mgmt.network.v2020_05_01.models.BastionShareableLinkListRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_bastion_shareable_link_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
bsl_request=bsl_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_bastion_shareable_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/deleteShareableLinks'} # type: ignore
def get_bastion_shareable_link(
self,
resource_group_name: str,
bastion_host_name: str,
bsl_request: "_models.BastionShareableLinkListRequest",
**kwargs: Any
) -> AsyncIterable["_models.BastionShareableLinkListResult"]:
"""Return the Bastion Shareable Links for all the VMs specified in the request.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param bsl_request: Post request for all the Bastion Shareable Link endpoints.
:type bsl_request: ~azure.mgmt.network.v2020_05_01.models.BastionShareableLinkListRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionShareableLinkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionShareableLinkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionShareableLinkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_bastion_shareable_link.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(bsl_request, 'BastionShareableLinkListRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionShareableLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_bastion_shareable_link.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/getShareableLinks'} # type: ignore
async def _get_active_sessions_initial(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs: Any
) -> Optional["_models.BastionActiveSessionListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BastionActiveSessionListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_active_sessions_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BastionActiveSessionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_active_sessions_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/getActiveSessions'} # type: ignore
async def begin_get_active_sessions(
self,
resource_group_name: str,
bastion_host_name: str,
**kwargs: Any
) -> AsyncLROPoller[AsyncItemPaged["_models.BastionActiveSessionListResult"]]:
"""Returns the list of currently active sessions on the Bastion.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either BastionActiveSessionListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionActiveSessionListResult]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionActiveSessionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_active_sessions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionActiveSessionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionActiveSessionListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_active_sessions_initial(
resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(
internal_get_next, extract_data
)
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_active_sessions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/getActiveSessions'} # type: ignore
def disconnect_active_sessions(
self,
resource_group_name: str,
bastion_host_name: str,
session_ids: "_models.SessionIds",
**kwargs: Any
) -> AsyncIterable["_models.BastionSessionDeleteResult"]:
"""Returns the list of currently active sessions on the Bastion.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param bastion_host_name: The name of the Bastion Host.
:type bastion_host_name: str
:param session_ids: The list of sessionids to disconnect.
:type session_ids: ~azure.mgmt.network.v2020_05_01.models.SessionIds
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BastionSessionDeleteResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.BastionSessionDeleteResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BastionSessionDeleteResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.disconnect_active_sessions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'bastionHostName': self._serialize.url("bastion_host_name", bastion_host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(session_ids, 'SessionIds')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(session_ids, 'SessionIds')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('BastionSessionDeleteResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
disconnect_active_sessions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/bastionHosts/{bastionHostName}/disconnectActiveSessions'} # type: ignore
async def check_dns_name_availability(
self,
location: str,
domain_name_label: str,
**kwargs: Any
) -> "_models.DnsNameAvailabilityResult":
"""Checks whether a domain name in the cloudapp.azure.com zone is available for use.
:param location: The location of the domain name.
:type location: str
:param domain_name_label: The domain name to be verified. It must conform to the following
regular expression: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$.
:type domain_name_label: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DnsNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.DnsNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DnsNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.check_dns_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['domainNameLabel'] = self._serialize.query("domain_name_label", domain_name_label, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DnsNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_dns_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability'} # type: ignore
async def supported_security_providers(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs: Any
) -> "_models.VirtualWanSecurityProviders":
"""Gives the supported security providers for the virtual wan.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN for which supported security providers are
needed.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWanSecurityProviders, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.VirtualWanSecurityProviders
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWanSecurityProviders"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.supported_security_providers.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualWanSecurityProviders', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_security_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/supportedSecurityProviders'} # type: ignore
async def _generatevirtualwanvpnserverconfigurationvpnprofile_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
vpn_client_params: "_models.VirtualWanVpnProfileParameters",
**kwargs: Any
) -> Optional["_models.VpnProfileResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnProfileResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generatevirtualwanvpnserverconfigurationvpnprofile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_client_params, 'VirtualWanVpnProfileParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnProfileResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevirtualwanvpnserverconfigurationvpnprofile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/GenerateVpnProfile'} # type: ignore
async def begin_generatevirtualwanvpnserverconfigurationvpnprofile(
self,
resource_group_name: str,
virtual_wan_name: str,
vpn_client_params: "_models.VirtualWanVpnProfileParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnProfileResponse"]:
"""Generates a unique VPN profile for P2S clients for VirtualWan and associated
VpnServerConfiguration combination in the specified resource group.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN whose associated VpnServerConfigurations is
needed.
:type virtual_wan_name: str
:param vpn_client_params: Parameters supplied to the generate VirtualWan VPN profile generation
operation.
:type vpn_client_params: ~azure.mgmt.network.v2020_05_01.models.VirtualWanVpnProfileParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnProfileResponse or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VpnProfileResponse]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnProfileResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._generatevirtualwanvpnserverconfigurationvpnprofile_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
vpn_client_params=vpn_client_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnProfileResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevirtualwanvpnserverconfigurationvpnprofile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWANName}/GenerateVpnProfile'} # type: ignore
| |
from __future__ import unicode_literals
from datetime import datetime
import unittest
from django.core.paginator import (Paginator, EmptyPage, InvalidPage,
PageNotAnInteger)
from django.test import TestCase
from django.utils import six
from .models import Article
from .custom import ValidAdjacentNumsPaginator
class PaginationTests(unittest.TestCase):
"""
Tests for the Paginator and Page classes.
"""
def check_paginator(self, params, output):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that its attributes match the passed output.
"""
count, num_pages, page_range = output
paginator = Paginator(*params)
self.check_attribute('count', paginator, count, params)
self.check_attribute('num_pages', paginator, num_pages, params)
self.check_attribute('page_range', paginator, page_range, params, coerce=list)
def check_attribute(self, name, paginator, expected, params, coerce=None):
"""
Helper method that checks a single attribute and gives a nice error
message upon test failure.
"""
got = getattr(paginator, name)
if coerce is not None:
got = coerce(got)
self.assertEqual(expected, got,
"For '%s', expected %s but got %s. Paginator parameters were: %s"
% (name, expected, got, params))
def test_paginator(self):
"""
Tests the paginator attributes using varying inputs.
"""
nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]
ten = nine + [10]
eleven = ten + [11]
tests = (
# Each item is two tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is resulting Paginator attributes - count,
# num_pages, and page_range.
# Ten items, varying orphans, no empty first page.
((ten, 4, 0, False), (10, 3, [1, 2, 3])),
((ten, 4, 1, False), (10, 3, [1, 2, 3])),
((ten, 4, 2, False), (10, 2, [1, 2])),
((ten, 4, 5, False), (10, 2, [1, 2])),
((ten, 4, 6, False), (10, 1, [1])),
# Ten items, varying orphans, allow empty first page.
((ten, 4, 0, True), (10, 3, [1, 2, 3])),
((ten, 4, 1, True), (10, 3, [1, 2, 3])),
((ten, 4, 2, True), (10, 2, [1, 2])),
((ten, 4, 5, True), (10, 2, [1, 2])),
((ten, 4, 6, True), (10, 1, [1])),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1, [1])),
(([1], 4, 1, False), (1, 1, [1])),
(([1], 4, 2, False), (1, 1, [1])),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1, [1])),
(([1], 4, 1, True), (1, 1, [1])),
(([1], 4, 2, True), (1, 1, [1])),
# Zero items, varying orphans, no empty first page.
(([], 4, 0, False), (0, 0, [])),
(([], 4, 1, False), (0, 0, [])),
(([], 4, 2, False), (0, 0, [])),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 1, [1])),
(([], 4, 1, True), (0, 1, [1])),
(([], 4, 2, True), (0, 1, [1])),
# Number if items one less than per_page.
(([], 1, 0, True), (0, 1, [1])),
(([], 1, 0, False), (0, 0, [])),
(([1], 2, 0, True), (1, 1, [1])),
((nine, 10, 0, True), (9, 1, [1])),
# Number if items equal to per_page.
(([1], 1, 0, True), (1, 1, [1])),
(([1, 2], 2, 0, True), (2, 1, [1])),
((ten, 10, 0, True), (10, 1, [1])),
# Number if items one more than per_page.
(([1, 2], 1, 0, True), (2, 2, [1, 2])),
(([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),
((eleven, 10, 0, True), (11, 2, [1, 2])),
# Number if items one more than per_page with one orphan.
(([1, 2], 1, 1, True), (2, 1, [1])),
(([1, 2, 3], 2, 1, True), (3, 1, [1])),
((eleven, 10, 1, True), (11, 1, [1])),
# Non-integer inputs
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, '4', 1, False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
((ten, 4, '1', False), (10, 3, [1, 2, 3])),
)
for params, output in tests:
self.check_paginator(params, output)
def test_invalid_page_number(self):
"""
Tests that invalid page numbers result in the correct exception being
raised.
"""
paginator = Paginator([1, 2, 3], 2)
self.assertRaises(InvalidPage, paginator.page, 3)
self.assertRaises(PageNotAnInteger, paginator.validate_number, None)
self.assertRaises(PageNotAnInteger, paginator.validate_number, 'x')
# With no content and allow_empty_first_page=True, 1 is a valid page number
paginator = Paginator([], 2)
self.assertEqual(paginator.validate_number(1), 1)
def test_paginate_misc_classes(self):
class CountContainer(object):
def count(self):
return 42
# Paginator can be passed other objects with a count() method.
paginator = Paginator(CountContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
# Paginator can be passed other objects that implement __len__.
class LenContainer(object):
def __len__(self):
return 42
paginator = Paginator(LenContainer(), 10)
self.assertEqual(42, paginator.count)
self.assertEqual(5, paginator.num_pages)
self.assertEqual([1, 2, 3, 4, 5], list(paginator.page_range))
def check_indexes(self, params, page_num, indexes):
"""
Helper method that instantiates a Paginator object from the passed
params and then checks that the start and end indexes of the passed
page_num match those given as a 2-tuple in indexes.
"""
paginator = Paginator(*params)
if page_num == 'first':
page_num = 1
elif page_num == 'last':
page_num = paginator.num_pages
page = paginator.page(page_num)
start, end = indexes
msg = ("For %s of page %s, expected %s but got %s."
" Paginator parameters were: %s")
self.assertEqual(start, page.start_index(),
msg % ('start index', page_num, start, page.start_index(), params))
self.assertEqual(end, page.end_index(),
msg % ('end index', page_num, end, page.end_index(), params))
def test_page_indexes(self):
"""
Tests that paginator pages have the correct start and end indexes.
"""
ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
tests = (
# Each item is three tuples:
# First tuple is Paginator parameters - object_list, per_page,
# orphans, and allow_empty_first_page.
# Second tuple is the start and end indexes of the first page.
# Third tuple is the start and end indexes of the last page.
# Ten items, varying per_page, no orphans.
((ten, 1, 0, True), (1, 1), (10, 10)),
((ten, 2, 0, True), (1, 2), (9, 10)),
((ten, 3, 0, True), (1, 3), (10, 10)),
((ten, 5, 0, True), (1, 5), (6, 10)),
# Ten items, varying per_page, with orphans.
((ten, 1, 1, True), (1, 1), (9, 10)),
((ten, 1, 2, True), (1, 1), (8, 10)),
((ten, 3, 1, True), (1, 3), (7, 10)),
((ten, 3, 2, True), (1, 3), (7, 10)),
((ten, 3, 4, True), (1, 3), (4, 10)),
((ten, 5, 1, True), (1, 5), (6, 10)),
((ten, 5, 2, True), (1, 5), (6, 10)),
((ten, 5, 5, True), (1, 10), (1, 10)),
# One item, varying orphans, no empty first page.
(([1], 4, 0, False), (1, 1), (1, 1)),
(([1], 4, 1, False), (1, 1), (1, 1)),
(([1], 4, 2, False), (1, 1), (1, 1)),
# One item, varying orphans, allow empty first page.
(([1], 4, 0, True), (1, 1), (1, 1)),
(([1], 4, 1, True), (1, 1), (1, 1)),
(([1], 4, 2, True), (1, 1), (1, 1)),
# Zero items, varying orphans, allow empty first page.
(([], 4, 0, True), (0, 0), (0, 0)),
(([], 4, 1, True), (0, 0), (0, 0)),
(([], 4, 2, True), (0, 0), (0, 0)),
)
for params, first, last in tests:
self.check_indexes(params, 'first', first)
self.check_indexes(params, 'last', last)
# When no items and no empty first page, we should get EmptyPage error.
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)
self.assertRaises(EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)
def test_page_sequence(self):
"""
Tests that a paginator page acts like a standard sequence.
"""
eleven = 'abcdefghijk'
page2 = Paginator(eleven, per_page=5, orphans=1).page(2)
self.assertEqual(len(page2), 6)
self.assertTrue('k' in page2)
self.assertFalse('a' in page2)
self.assertEqual(''.join(page2), 'fghijk')
self.assertEqual(''.join(reversed(page2)), 'kjihgf')
def test_get_page_hook(self):
"""
Tests that a Paginator subclass can use the ``_get_page`` hook to
return an alternative to the standard Page class.
"""
eleven = 'abcdefghijk'
paginator = ValidAdjacentNumsPaginator(eleven, per_page=6)
page1 = paginator.page(1)
page2 = paginator.page(2)
self.assertIsNone(page1.previous_page_number())
self.assertEqual(page1.next_page_number(), 2)
self.assertEqual(page2.previous_page_number(), 1)
self.assertIsNone(page2.next_page_number())
class ModelPaginationTests(TestCase):
"""
Test pagination with Django model instances
"""
def setUp(self):
# Prepare a list of objects for pagination.
for x in range(1, 10):
a = Article(headline='Article %s' % x, pub_date=datetime(2005, 7, 29))
a.save()
def test_first_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
self.assertEqual("<Page 1 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 1>",
"<Article: Article 2>",
"<Article: Article 3>",
"<Article: Article 4>",
"<Article: Article 5>"
],
ordered=False
)
self.assertTrue(p.has_next())
self.assertFalse(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertEqual(2, p.next_page_number())
self.assertRaises(InvalidPage, p.previous_page_number)
self.assertEqual(1, p.start_index())
self.assertEqual(5, p.end_index())
def test_last_page(self):
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(2)
self.assertEqual("<Page 2 of 2>", six.text_type(p))
self.assertQuerysetEqual(p.object_list, [
"<Article: Article 6>",
"<Article: Article 7>",
"<Article: Article 8>",
"<Article: Article 9>"
],
ordered=False
)
self.assertFalse(p.has_next())
self.assertTrue(p.has_previous())
self.assertTrue(p.has_other_pages())
self.assertRaises(InvalidPage, p.next_page_number)
self.assertEqual(1, p.previous_page_number())
self.assertEqual(6, p.start_index())
self.assertEqual(9, p.end_index())
def test_page_getitem(self):
"""
Tests proper behavior of a paginator page __getitem__ (queryset
evaluation, slicing, exception raised).
"""
paginator = Paginator(Article.objects.all(), 5)
p = paginator.page(1)
# Make sure object_list queryset is not evaluated by an invalid __getitem__ call.
# (this happens from the template engine when using eg: {% page_obj.has_previous %})
self.assertIsNone(p.object_list._result_cache)
self.assertRaises(TypeError, lambda: p['has_previous'])
self.assertIsNone(p.object_list._result_cache)
self.assertNotIsInstance(p.object_list, list)
# Make sure slicing the Page object with numbers and slice objects work.
self.assertEqual(p[0], Article.objects.get(headline='Article 1'))
self.assertQuerysetEqual(p[slice(2)], [
"<Article: Article 1>",
"<Article: Article 2>",
]
)
# After __getitem__ is called, object_list is a list
self.assertIsInstance(p.object_list, list)
| |
from django.test import TestCase
from django.test import Client
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from datetime import datetime, timedelta
import json
from comments.models import Comment, Site, Thread
User = get_user_model()
class AdminUsertestCases(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser(
"donald@duck.com",
"password"
)
self.user = User.objects.create_user(
"scrooge@duck.com",
"password",
)
self.user_hidden = User.objects.create_user(
"daffy@duck.com",
"password",
)
self.site = Site.objects.create(domain='www.google.com')
self.user_hidden.hidden.add(self.site)
self.user_hidden.created = datetime.today() - timedelta(1)
self.user_hidden.save()
self.client = Client()
self.client.login(email="donald@duck.com", password="password")
def test_get_users_returns_all_users_except_admin(self):
thread = Thread.objects.create(site=self.site)
Comment.objects.create(thread=thread, user=self.user)
Comment.objects.create(thread=thread, user=self.user_hidden)
Comment.objects.create(thread=thread, user=self.admin)
resp = self.client.get(reverse("c4all_admin:get_users",
args=[self.site.id]))
self.assertEqual(resp.status_code, 200)
users = resp.context['users'].object_list
self.assertTrue(self.user in users)
self.assertTrue(self.user_hidden in users)
self.assertTrue(self.admin not in users)
def test_get_hidden_users_returns_hidden_users(self):
thread = Thread.objects.create(site=self.site)
Comment.objects.create(thread=thread, user=self.user)
Comment.objects.create(thread=thread, user=self.user_hidden)
resp = self.client.get(reverse("c4all_admin:get_users",
args=[self.site.id]), {"hidden": True})
self.assertTrue(resp.status_code, 200)
users = resp.context['users'].object_list
self.assertTrue(self.user_hidden in users)
self.assertFalse(self.user in users)
def test_user_bulk_actions_delete_successfully_deletes_user_comments(self):
thread = Thread.objects.create(site=self.site)
Comment.objects.create(thread=thread, user=self.user)
Comment.objects.create(thread=thread, user=self.user_hidden)
self.assertEqual(Comment.objects.count(), 2)
resp = self.client.post(
reverse("c4all_admin:user_bulk_actions"),
{
"site_id": self.site.id,
"action": ["delete"],
"choices": [self.user.id, self.user_hidden.id]
}
)
self.assertEqual(resp.status_code, 302)
users = User.objects.all()
self.assertEqual(users.count(), 3)
self.assertFalse(Comment.objects.count())
def test_user_bulk_actions_delete_successfully_deletes_user_comments_not_admin(self):
resp = self.client.post(
reverse("c4all_admin:user_bulk_actions"),
{
"site_id": self.site.id,
"action": ["delete"],
"choices": [self.user.id, self.admin.id]
}
)
users = User.objects.all()
self.assertEqual(users.count(), 3)
thread = Thread.objects.create(site=self.site)
Comment.objects.create(thread=thread, user=self.user)
Comment.objects.create(thread=thread, user=self.admin)
self.assertEqual(resp.status_code, 302)
users = User.objects.all()
self.assertEqual(users.count(), 3)
self.assertEqual(Comment.objects.filter(user=self.admin).count(), 1)
def test_user_bulk_actions_hide_successfully_hides_users(self):
users = User.objects.filter(hidden__isnull=False)
self.assertEqual(users.count(), 1)
resp = self.client.post(
reverse("c4all_admin:user_bulk_actions"),
{
"site_id": self.site.id,
"action": ["hide"],
"choices": [self.user.id]
}
)
self.assertEqual(resp.status_code, 302)
users = User.objects.filter(hidden__isnull=False)
self.assertEqual(users.count(), 2)
self.assertTrue(self.user in users)
def test_user_bulk_actions_hide_successfully_hides_user_not_admin(self):
site2 = Site.objects.create(domain='www.example.com')
users = Site.objects.get(id=site2.id).hidden_users.all()
self.assertEqual(users.count(), 0)
self.assertTrue(self.admin not in users)
resp = self.client.post(
reverse("c4all_admin:user_bulk_actions"),
{
"site_id": site2.id,
"action": ["hide"],
"choices": [self.user.id, self.admin.id]
}
)
self.assertEqual(resp.status_code, 302)
users = Site.objects.get(id=site2.id).hidden_users.all()
self.assertEqual(users.count(), 1)
self.assertTrue(self.admin not in users)
def test_user_bulk_actions_hide_hidden_user_doesnt_change_status(self):
resp = self.client.post(
reverse("c4all_admin:user_bulk_actions"),
{
"site_id": self.site.id,
"action": ["hide"],
"choices": [self.user_hidden.id]
}
)
self.assertEqual(resp.status_code, 302)
users = User.objects.filter(hidden__isnull=False)
self.assertEqual(users.count(), 1)
self.assertTrue(self.user_hidden in users)
def test_user_hide_not_ajax_call_fails(self):
"""
Tests endpoint's response to non-ajax call. Endpoint should return
a 400 response.
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(
self.site.id, self.user.id, )),
)
self.assertEqual(r.status_code, 400)
def test_user_hide_succeeds(self):
"""
Tests endpoint which serves for hiding users. Non hidden user's id is
provided. After making a call to the endpoint, user's hidden state
should change to True.
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(
self.site.id, self.user.id, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 200)
user = User.objects.get(id=self.user.id)
self.assertTrue(user.hidden.filter(id=self.site.id))
def test_user_hide_admin_fails(self):
"""
Tests endpoint which serves for hiding users. Admin's id is
provided. After making a call to the endpoint, endpoint should return
404 response (admin's state should only be changed from "superadmin")
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(
self.site.id, self.admin.id, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 404)
user = User.objects.get(id=self.admin.id)
self.assertFalse(user.hidden.filter(id=self.site.id))
def test_user_hide_hidden_user_doesnt_change_state(self):
"""
Tests endpoint which serves for hiding users. Hidden user's id is
provided. After making a call to the endpoint, hidden users's
state should not be changed.
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(
self.site.id, self.user_hidden.id, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 200)
user = User.objects.get(id=self.user_hidden.id)
self.assertTrue(user.hidden.filter(id=self.site.id))
def test_user_hide_returns_404_for_nonexisting_user(self):
"""
Tests endpoint which serves for hiding users. If non existent user id
is provided, endpoint should return 404 response.
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(self.site.id, 9999, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 404)
def test_user_unhide_not_ajax_call_fails(self):
"""
Tests endpoint's response to non-ajax call. Endpoint should return
a 400 response.
"""
r = self.client.post(
reverse('c4all_admin:unhide_user', args=(
self.site.id, self.user.id, )),
)
self.assertEqual(r.status_code, 400)
def test_user_unhide_succeeds(self):
"""
Tests endpoint which serves for hiding users. Hidden user's id is
provided. After making a call to the endpoint, user's hidden state
should change to False.
"""
r = self.client.post(
reverse('c4all_admin:unhide_user', args=(
self.site.id, self.user_hidden.id, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 200)
user = User.objects.get(id=self.user_hidden.id)
self.assertFalse(user.hidden.filter(id=self.site.id))
def test_user_unhide_admin_fails(self):
"""
Tests endpoint which serves for unhiding users. Admin's id is
provided. After making a call to the endpoint, endpoint should return
404 response (admin's state should only be changed from "superadmin")
"""
r = self.client.post(
reverse('c4all_admin:hide_user', args=(
self.site.id, self.admin.id, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 404)
user = User.objects.get(id=self.admin.id)
self.assertFalse(user.hidden.filter(id=self.site.id))
def test_user_unhide_returns_404_for_nonexisting_user(self):
"""
Tests endpoint which serves for unhiding users. If non existent user id
is provided, endpoint should return 404 response.
"""
r = self.client.post(
reverse('c4all_admin:unhide_user', args=(self.site.id, 9999, )),
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(r.status_code, 404)
def test_admin_changes_password_successfully(self):
"""
Tests endpoint for changing user's password. If both password and
repeated password are provided and are the same endpoint should
return Http200 (and change password, of course). Test also asserts
that endpoint returns correct user's id for whom the password change
was attempted.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[self.user.id]),
data={
"password1": "pass",
"password2": "pass"
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(pass_1, pass_2)
content = json.loads(resp.content)
self.assertEqual(content['user_id'], self.user.id)
def test_admin_changes_password_passwords_different_doesnt_change_password(self):
"""
If provided password doesn't match repeated password, endpoint
returns Http400. Test also asserts that endpoint returns correct
user's id for whom the password change was attempted.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[self.user.id]),
data={
"password1": "pass",
"password2": "pass_pass"
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 200)
self.assertEqual(pass_1, pass_2)
content = json.loads(resp.content)
self.assertEqual(content['user_id'], self.user.id)
def test_admin_password_change_not_provided_password2_doesnt_change_password(self):
"""
If repeated password not provided, endpoint returns Http200 but doesn't
change the password. Test also asserts that endpoint returns correct
user's id for whom the password change was attempted.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[self.user.id]),
data={
"password1": "pass",
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 200)
self.assertEqual(pass_1, pass_2)
content = json.loads(resp.content)
self.assertEqual(content['user_id'], self.user.id)
def test_admin_password_change_not_provided_password1_doesnt_change_password(self):
"""
If password not provided, endpoint returns Http200 but doesn't
change the password. Test also asserts that endpoint returns correct
user's id for whom the password change was attempted.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[self.user.id]),
data={
"password2": "pass"
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 200)
self.assertEqual(pass_1, pass_2)
content = json.loads(resp.content)
self.assertEqual(content['user_id'], self.user.id)
def test_admin_password_change_nonexistent_user_returns_404(self):
"""
If user whose password admin wants to change does not exist,
endpoint returns Http404.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[9999]),
data={
"password1": "pass",
"password2": "pass"
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 404)
self.assertEqual(pass_1, pass_2)
def test_change_admins_password_fails(self):
"""
Admin's password should not be changed from c4all admin UI. Endpoint
should return Http404.
"""
pass_1 = self.user.password
resp = self.client.post(
reverse("c4all_admin:change_password", args=[self.admin.id]),
data={
"password1": "pass",
"password2": "pass"
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
pass_2 = User.objects.get(id=self.user.id).password
self.assertEqual(resp.status_code, 404)
self.assertEqual(pass_1, pass_2)
| |
"""Support for installing and building the "wheel" binary package format.
"""
import collections
import compileall
import contextlib
import csv
import importlib
import logging
import os.path
import re
import shutil
import sys
import warnings
from base64 import urlsafe_b64encode
from itertools import chain, filterfalse, starmap
from zipfile import ZipFile
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.distlib.util import get_export_entry
from pip._vendor.six import ensure_str, ensure_text, reraise
from pip._internal.exceptions import InstallationError
from pip._internal.locations import get_major_minor_version
from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
from pip._internal.models.scheme import SCHEME_KEYS
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.unpacking import (
current_umask,
is_within_directory,
set_extracted_file_to_default_mode_plus_executable,
zip_item_is_executable,
)
from pip._internal.utils.wheel import parse_wheel, pkg_resources_distribution_for_wheel
# Use the custom cast function at runtime to make cast work,
# and import typing.cast when performing pre-commit and type
# checks
if not MYPY_CHECK_RUNNING:
from pip._internal.utils.typing import cast
else:
from email.message import Message
from typing import (
IO,
Any,
BinaryIO,
Callable,
Dict,
Iterable,
Iterator,
List,
NewType,
Optional,
Protocol,
Sequence,
Set,
Tuple,
Union,
cast,
)
from zipfile import ZipInfo
from pip._vendor.pkg_resources import Distribution
from pip._internal.models.scheme import Scheme
RecordPath = NewType('RecordPath', str)
InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
class File(Protocol):
src_record_path = None # type: RecordPath
dest_path = None # type: str
changed = None # type: bool
def save(self):
# type: () -> None
pass
logger = logging.getLogger(__name__)
def rehash(path, blocksize=1 << 20):
# type: (str, int) -> Tuple[str, str]
"""Return (encoded_digest, length) for path using hashlib.sha256()"""
h, length = hash_file(path, blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, str(length))
def csv_io_kwargs(mode):
# type: (str) -> Dict[str, Any]
"""Return keyword arguments to properly open a CSV file
in the given mode.
"""
return {'mode': mode, 'newline': '', 'encoding': 'utf-8'}
def fix_script(path):
# type: (str) -> bool
"""Replace #!python with #!/path/to/python
Return True if file was changed.
"""
# XXX RECORD hashes will need to be updated
assert os.path.isfile(path)
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
def wheel_root_is_purelib(metadata):
# type: (Message) -> bool
return metadata.get("Root-Is-Purelib", "").lower() == "true"
def get_entrypoints(distribution):
# type: (Distribution) -> Tuple[Dict[str, str], Dict[str, str]]
# get the entry points and then the script names
try:
console = distribution.get_entry_map('console_scripts')
gui = distribution.get_entry_map('gui_scripts')
except KeyError:
# Our dict-based Distribution raises KeyError if entry_points.txt
# doesn't exist.
return {}, {}
def _split_ep(s):
# type: (pkg_resources.EntryPoint) -> Tuple[str, str]
"""get the string representation of EntryPoint,
remove space and split on '='
"""
split_parts = str(s).replace(" ", "").split("=")
return split_parts[0], split_parts[1]
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
} # type: Dict[str, Set[str]]
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, dir_scripts in warn_for.items():
sorted_scripts = sorted(dir_scripts) # type: List[str]
if len(sorted_scripts) == 1:
start_text = "script {} is".format(sorted_scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Add a note if any directory starts with ~
warn_for_tilde = any(
i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
)
if warn_for_tilde:
tilde_warning_msg = (
"NOTE: The current PATH contains path(s) starting with `~`, "
"which may not be expanded by all applications."
)
msg_lines.append(tilde_warning_msg)
# Returns the formatted multiline message
return "\n".join(msg_lines)
def _normalized_outrows(outrows):
# type: (Iterable[InstalledCSVRow]) -> List[Tuple[str, str, str]]
"""Normalize the given rows of a RECORD file.
Items in each row are converted into str. Rows are then sorted to make
the value more predictable for tests.
Each row is a 3-tuple (path, hash, size) and corresponds to a record of
a RECORD file (see PEP 376 and PEP 427 for details). For the rows
passed to this function, the size can be an integer as an int or string,
or the empty string.
"""
# Normally, there should only be one row per path, in which case the
# second and third elements don't come into play when sorting.
# However, in cases in the wild where a path might happen to occur twice,
# we don't want the sort operation to trigger an error (but still want
# determinism). Since the third element can be an int or string, we
# coerce each element to a string to avoid a TypeError in this case.
# For additional background, see--
# https://github.com/pypa/pip/issues/5868
return sorted(
(ensure_str(record_path, encoding='utf-8'), hash_, str(size))
for record_path, hash_, size in outrows
)
def _record_to_fs_path(record_path):
# type: (RecordPath) -> str
return record_path
def _fs_to_record_path(path, relative_to=None):
# type: (str, Optional[str]) -> RecordPath
if relative_to is not None:
# On Windows, do not handle relative paths if they belong to different
# logical disks
if os.path.splitdrive(path)[0].lower() == \
os.path.splitdrive(relative_to)[0].lower():
path = os.path.relpath(path, relative_to)
path = path.replace(os.path.sep, '/')
return cast('RecordPath', path)
def _parse_record_path(record_column):
# type: (str) -> RecordPath
p = ensure_text(record_column, encoding='utf-8')
return cast('RecordPath', p)
def get_csv_rows_for_installed(
old_csv_rows, # type: List[List[str]]
installed, # type: Dict[RecordPath, RecordPath]
changed, # type: Set[RecordPath]
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning('RECORD line has more than three elements: %s', row)
old_record_path = _parse_record_path(row[0])
new_record_path = installed.pop(old_record_path, old_record_path)
if new_record_path in changed:
digest, length = rehash(_record_to_fs_path(new_record_path))
else:
digest = row[1] if len(row) > 1 else ''
length = row[2] if len(row) > 2 else ''
installed_rows.append((new_record_path, digest, length))
for f in generated:
path = _fs_to_record_path(f, lib_dir)
digest, length = rehash(f)
installed_rows.append((path, digest, length))
for installed_record_path in installed.values():
installed_rows.append((installed_record_path, '', ''))
return installed_rows
def get_console_script_specs(console):
# type: (Dict[str, str]) -> List[str]
"""
Given the mapping from entrypoint name to callable, return the relevant
console script specs.
"""
# Don't mutate caller's version
console = console.copy()
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append('pip = ' + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
'pip{} = {}'.format(sys.version_info[0], pip_script)
)
scripts_to_generate.append(
f'pip{get_major_minor_version()} = {pip_script}'
)
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append(
'easy_install = ' + easy_install_script
)
scripts_to_generate.append(
'easy_install-{} = {}'.format(
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console entry points specified in the wheel
scripts_to_generate.extend(starmap('{} = {}'.format, console.items()))
return scripts_to_generate
class ZipBackedFile:
def __init__(self, src_record_path, dest_path, zip_file):
# type: (RecordPath, str, ZipFile) -> None
self.src_record_path = src_record_path
self.dest_path = dest_path
self._zip_file = zip_file
self.changed = False
def _getinfo(self):
# type: () -> ZipInfo
return self._zip_file.getinfo(self.src_record_path)
def save(self):
# type: () -> None
# directory creation is lazy and after file filtering
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
parent_dir = os.path.dirname(self.dest_path)
ensure_dir(parent_dir)
# When we open the output file below, any existing file is truncated
# before we start writing the new contents. This is fine in most
# cases, but can cause a segfault if pip has loaded a shared
# object (e.g. from pyopenssl through its vendored urllib3)
# Since the shared object is mmap'd an attempt to call a
# symbol in it will then cause a segfault. Unlinking the file
# allows writing of new contents while allowing the process to
# continue to use the old copy.
if os.path.exists(self.dest_path):
os.unlink(self.dest_path)
zipinfo = self._getinfo()
with self._zip_file.open(zipinfo) as f:
with open(self.dest_path, "wb") as dest:
shutil.copyfileobj(f, dest)
if zip_item_is_executable(zipinfo):
set_extracted_file_to_default_mode_plus_executable(self.dest_path)
class ScriptFile:
def __init__(self, file):
# type: (File) -> None
self._file = file
self.src_record_path = self._file.src_record_path
self.dest_path = self._file.dest_path
self.changed = False
def save(self):
# type: () -> None
self._file.save()
self.changed = fix_script(self.dest_path)
class MissingCallableSuffix(InstallationError):
def __init__(self, entry_point):
# type: (str) -> None
super().__init__(
"Invalid script entry point: {} - A callable "
"suffix is required. Cf https://packaging.python.org/"
"specifications/entry-points/#use-for-scripts for more "
"information.".format(entry_point)
)
def _raise_for_invalid_entrypoint(specification):
# type: (str) -> None
entry = get_export_entry(specification)
if entry is not None and entry.suffix is None:
raise MissingCallableSuffix(str(entry))
class PipScriptMaker(ScriptMaker):
def make(self, specification, options=None):
# type: (str, Dict[str, Any]) -> List[str]
_raise_for_invalid_entrypoint(specification)
return super().make(specification, options)
def _install_wheel(
name, # type: str
wheel_zip, # type: ZipFile
wheel_path, # type: str
scheme, # type: Scheme
pycompile=True, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
):
# type: (...) -> None
"""Install a wheel.
:param name: Name of the project to install
:param wheel_zip: open ZipFile for wheel being installed
:param scheme: Distutils scheme dictating the install directories
:param req_description: String used in place of the requirement, for
logging
:param pycompile: Whether to byte-compile installed Python files
:param warn_script_location: Whether to check that scripts are installed
into a directory on PATH
:raises UnsupportedWheel:
* when the directory holds an unpacked wheel with incompatible
Wheel-Version
* when the .dist-info dir does not match the wheel
"""
info_dir, metadata = parse_wheel(wheel_zip, name)
if wheel_root_is_purelib(metadata):
lib_dir = scheme.purelib
else:
lib_dir = scheme.platlib
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {} # type: Dict[RecordPath, RecordPath]
changed = set() # type: Set[RecordPath]
generated = [] # type: List[str]
def record_installed(srcfile, destfile, modified=False):
# type: (RecordPath, str, bool) -> None
"""Map archive RECORD paths to installation RECORD paths."""
newpath = _fs_to_record_path(destfile, lib_dir)
installed[srcfile] = newpath
if modified:
changed.add(_fs_to_record_path(destfile))
def all_paths():
# type: () -> Iterable[RecordPath]
names = wheel_zip.namelist()
# If a flag is set, names may be unicode in Python 2. We convert to
# text explicitly so these are valid for lookup in RECORD.
decoded_names = map(ensure_text, names)
for name in decoded_names:
yield cast("RecordPath", name)
def is_dir_path(path):
# type: (RecordPath) -> bool
return path.endswith("/")
def assert_no_path_traversal(dest_dir_path, target_path):
# type: (str, str) -> None
if not is_within_directory(dest_dir_path, target_path):
message = (
"The wheel {!r} has a file {!r} trying to install"
" outside the target directory {!r}"
)
raise InstallationError(
message.format(wheel_path, target_path, dest_dir_path)
)
def root_scheme_file_maker(zip_file, dest):
# type: (ZipFile, str) -> Callable[[RecordPath], File]
def make_root_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
dest_path = os.path.join(dest, normed_path)
assert_no_path_traversal(dest, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_root_scheme_file
def data_scheme_file_maker(zip_file, scheme):
# type: (ZipFile, Scheme) -> Callable[[RecordPath], File]
scheme_paths = {}
for key in SCHEME_KEYS:
encoded_key = ensure_text(key)
scheme_paths[encoded_key] = ensure_text(
getattr(scheme, key), encoding=sys.getfilesystemencoding()
)
def make_data_scheme_file(record_path):
# type: (RecordPath) -> File
normed_path = os.path.normpath(record_path)
try:
_, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
except ValueError:
message = (
"Unexpected file in {}: {!r}. .data directory contents"
" should be named like: '<scheme key>/<path>'."
).format(wheel_path, record_path)
raise InstallationError(message)
try:
scheme_path = scheme_paths[scheme_key]
except KeyError:
valid_scheme_keys = ", ".join(sorted(scheme_paths))
message = (
"Unknown scheme key used in {}: {} (for file {!r}). .data"
" directory contents should be in subdirectories named"
" with a valid scheme key ({})"
).format(
wheel_path, scheme_key, record_path, valid_scheme_keys
)
raise InstallationError(message)
dest_path = os.path.join(scheme_path, dest_subpath)
assert_no_path_traversal(scheme_path, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_data_scheme_file
def is_data_scheme_path(path):
# type: (RecordPath) -> bool
return path.split("/", 1)[0].endswith(".data")
paths = all_paths()
file_paths = filterfalse(is_dir_path, paths)
root_scheme_paths, data_scheme_paths = partition(
is_data_scheme_path, file_paths
)
make_root_scheme_file = root_scheme_file_maker(
wheel_zip,
ensure_text(lib_dir, encoding=sys.getfilesystemencoding()),
)
files = map(make_root_scheme_file, root_scheme_paths)
def is_script_scheme_path(path):
# type: (RecordPath) -> bool
parts = path.split("/", 2)
return (
len(parts) > 2 and
parts[0].endswith(".data") and
parts[1] == "scripts"
)
other_scheme_paths, script_scheme_paths = partition(
is_script_scheme_path, data_scheme_paths
)
make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
files = chain(files, other_scheme_files)
# Get the defined entry points
distribution = pkg_resources_distribution_for_wheel(
wheel_zip, name, wheel_path
)
console, gui = get_entrypoints(distribution)
def is_entrypoint_wrapper(file):
# type: (File) -> bool
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
path = file.dest_path
name = os.path.basename(path)
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
script_scheme_files = filterfalse(
is_entrypoint_wrapper, script_scheme_files
)
script_scheme_files = map(ScriptFile, script_scheme_files)
files = chain(files, script_scheme_files)
for file in files:
file.save()
record_installed(file.src_record_path, file.dest_path, file.changed)
def pyc_source_file_paths():
# type: () -> Iterator[str]
# We de-duplicate installation paths, since there can be overlap (e.g.
# file in .data maps to same location as file in wheel root).
# Sorting installation paths makes it easier to reproduce and debug
# issues related to permissions on existing files.
for installed_path in sorted(set(installed.values())):
full_installed_path = os.path.join(lib_dir, installed_path)
if not os.path.isfile(full_installed_path):
continue
if not full_installed_path.endswith('.py'):
continue
yield full_installed_path
def pyc_output_path(path):
# type: (str) -> str
"""Return the path the pyc file would have been written to.
"""
return importlib.util.cache_from_source(path)
# Compile all of the pyc files for the installed files
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for path in pyc_source_file_paths():
# Python 2's `compileall.compile_file` requires a str in
# error cases, so we must convert to the native type.
path_arg = ensure_str(
path, encoding=sys.getfilesystemencoding()
)
success = compileall.compile_file(
path_arg, force=True, quiet=True
)
if success:
pyc_path = pyc_output_path(path)
assert os.path.exists(pyc_path)
pyc_record_path = cast(
"RecordPath", pyc_path.replace(os.path.sep, "/")
)
record_installed(pyc_record_path, pyc_path)
logger.debug(stdout.getvalue())
maker = PipScriptMaker(None, scheme.scripts)
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = {''}
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Generate the console and GUI entry points specified in the wheel
scripts_to_generate = get_console_script_specs(console)
gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))
generated_console_scripts = maker.make_multiple(scripts_to_generate)
generated.extend(generated_console_scripts)
generated.extend(
maker.make_multiple(gui_scripts_to_generate, {'gui': True})
)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warning(msg)
generated_file_mode = 0o666 & ~current_umask()
@contextlib.contextmanager
def _generate_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
with adjacent_tmp_file(path, **kwargs) as f:
yield f
os.chmod(f.name, generated_file_mode)
replace(f.name, path)
dest_info_dir = os.path.join(lib_dir, info_dir)
# Record pip as the installer
installer_path = os.path.join(dest_info_dir, 'INSTALLER')
with _generate_file(installer_path) as installer_file:
installer_file.write(b'pip\n')
generated.append(installer_path)
# Record the PEP 610 direct URL reference
if direct_url is not None:
direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
with _generate_file(direct_url_path) as direct_url_file:
direct_url_file.write(direct_url.to_json().encode("utf-8"))
generated.append(direct_url_path)
# Record the REQUESTED file
if requested:
requested_path = os.path.join(dest_info_dir, 'REQUESTED')
with open(requested_path, "w"):
pass
generated.append(requested_path)
record_text = distribution.get_metadata('RECORD')
record_rows = list(csv.reader(record_text.splitlines()))
rows = get_csv_rows_for_installed(
record_rows,
installed=installed,
changed=changed,
generated=generated,
lib_dir=lib_dir)
# Record details of all files installed
record_path = os.path.join(dest_info_dir, 'RECORD')
with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
# The type mypy infers for record_file is different for Python 3
# (typing.IO[Any]) and Python 2 (typing.BinaryIO). We explicitly
# cast to typing.IO[str] as a workaround.
writer = csv.writer(cast('IO[str]', record_file))
writer.writerows(_normalized_outrows(rows))
@contextlib.contextmanager
def req_error_context(req_description):
# type: (str) -> Iterator[None]
try:
yield
except InstallationError as e:
message = "For req: {}. {}".format(req_description, e.args[0])
reraise(
InstallationError, InstallationError(message), sys.exc_info()[2]
)
def install_wheel(
name, # type: str
wheel_path, # type: str
scheme, # type: Scheme
req_description, # type: str
pycompile=True, # type: bool
warn_script_location=True, # type: bool
direct_url=None, # type: Optional[DirectUrl]
requested=False, # type: bool
):
# type: (...) -> None
with ZipFile(wheel_path, allowZip64=True) as z:
with req_error_context(req_description):
_install_wheel(
name=name,
wheel_zip=z,
wheel_path=wheel_path,
scheme=scheme,
pycompile=pycompile,
warn_script_location=warn_script_location,
direct_url=direct_url,
requested=requested,
)
| |
# Natural Language Toolkit: Viterbi Probabilistic Parser
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
from nltk.tree import Tree, ProbabilisticTree
from api import *
##//////////////////////////////////////////////////////
## Viterbi PCFG Parser
##//////////////////////////////////////////////////////
class ViterbiParser(ParserI):
"""
A bottom-up C{PCFG} parser that uses dynamic programming to find
the single most likely parse for a text. The C{ViterbiParser} parser
parses texts by filling in a X{most likely constituent table}.
This table records the most probable tree representation for any
given span and node value. In particular, it has an entry for
every start index, end index, and node value, recording the most
likely subtree that spans from the start index to the end index,
and has the given node value.
The C{ViterbiParser} parser fills in this table incrementally. It starts
by filling in all entries for constituents that span one element
of text (i.e., entries where the end index is one greater than the
start index). After it has filled in all table entries for
constituents that span one element of text, it fills in the
entries for constitutants that span two elements of text. It
continues filling in the entries for constituents spanning larger
and larger portions of the text, until the entire table has been
filled. Finally, it returns the table entry for a constituent
spanning the entire text, whose node value is the grammar's start
symbol.
In order to find the most likely constituent with a given span and
node value, the C{ViterbiParser} parser considers all productions that
could produce that node value. For each production, it finds all
children that collectively cover the span and have the node values
specified by the production's right hand side. If the probability
of the tree formed by applying the production to the children is
greater than the probability of the current entry in the table,
then the table is updated with this new tree.
A pseudo-code description of the algorithm used by
C{ViterbiParser} is:
- Create an empty most likely constituent table, M{MLC}.
- For M{width} in 1...len(M{text}):
- For M{start} in 1...len(M{text})-M{width}:
- For M{prod} in grammar.productions:
- For each sequence of subtrees [M{t[1]}, M{t[2]}, ...,
M{t[n]}] in M{MLC}, where M{t[i]}.node==M{prod}.rhs[i],
and the sequence covers [M{start}:M{start}+M{width}]:
- M{old_p} = M{MLC}[M{start}, M{start+width}, M{prod}.lhs]
- M{new_p} = P(M{t[1]})*P(M{t[1]})*...*P(M{t[n]})*P(M{prod})
- if M{new_p} > M{old_p}:
- M{new_tree} = Tree(M{prod}.lhs, M{t[1]}, M{t[2]},
..., M{t[n]})
- M{MLC}[M{start}, M{start+width}, M{prod}.lhs]
= M{new_tree}
- Return M{MLC}[0, len(M{text}), M{start_symbol}]
@type _grammar: C{pcfg.Grammar}
@ivar _grammar: The grammar used to parse sentences.
@type _trace: C{int}
@ivar _trace: The level of tracing output that should be generated
when parsing a text.
"""
def __init__(self, grammar, trace=0):
"""
Create a new C{ViterbiParser} parser, that uses {grammar} to
parse texts.
@type grammar: C{pcfg.Grammar}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
"""
self._grammar = grammar
self._trace = trace
def grammar(self):
return self._grammar
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
self._trace = trace
def nbest_parse(self, tokens, n=None):
# Inherit docs from ParserI
tokens = list(tokens)
self._grammar.check_coverage(tokens)
# The most likely constituent table. This table specifies the
# most likely constituent for a given span and type.
# Constituents can be either Trees or tokens. For
# Trees, the "type" is the Nonterminal for the tree's
# root node value. For Tokens, the "type" is the token's
# type. The table is stored as a dictionary, since it is
# sparse.
constituents = {}
# Initialize the constituents dictionary with the words from
# the text.
if self._trace: print ('Inserting tokens into the most likely'+
' constituents table...')
for index in range(len(tokens)):
token = tokens[index]
constituents[index,index+1,token] = token
if self._trace > 1:
self._trace_lexical_insertion(token, index, len(tokens))
# Consider each span of length 1, 2, ..., n; and add any trees
# that might cover that span to the constituents dictionary.
for length in range(1, len(tokens)+1):
if self._trace:
print ('Finding the most likely constituents'+
' spanning %d text elements...' % length)
#print constituents
for start in range(len(tokens)-length+1):
span = (start, start+length)
self._add_constituents_spanning(span, constituents,
tokens)
# Find all trees that span the entire text & have the right cat
trees = [constituents.get((0, len(tokens),
self._grammar.start()), [])]
# Sort the trees, and return the requested number of them.
trees.sort(lambda t1,t2: cmp(t2.prob(), t1.prob()))
return trees[:n]
def _add_constituents_spanning(self, span, constituents, tokens):
"""
Find any constituents that might cover C{span}, and add them
to the most likely constituents table.
@rtype: C{None}
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find possible constituents. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be included in
the constituent; and the second integer is the index of
the first token that should not be included in the
constituent. I.e., the constituent should cover
C{M{text}[span[0]:span[1]]}, where C{M{text}} is the text
that we are parsing.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. In particular,
C{constituents(M{s},M{e},M{nv})} is the most likely
C{ProbabilisticTree} that covers C{M{text}[M{s}:M{e}]}
and has a node value C{M{nv}.symbol()}, where C{M{text}}
is the text that we are parsing. When
C{_add_constituents_spanning} is called, C{constituents}
should contain all possible constituents that are shorter
than C{span}.
@type tokens: C{list} of tokens
@param tokens: The text we are parsing. This is only used for
trace output.
"""
# Since some of the grammar productions may be unary, we need to
# repeatedly try all of the productions until none of them add any
# new constituents.
changed = 1
while changed:
changed = 0
# Find all ways instantiations of the grammar productions that
# cover the span.
instantiations = self._find_instantiations(span, constituents)
# For each production instantiation, add a new
# ProbabilisticTree whose probability is the product
# of the childrens' probabilities and the production's
# probability.
for (production, children) in instantiations:
subtrees = [c for c in children if isinstance(c, Tree)]
p = reduce(lambda pr,t:pr*t.prob(),
subtrees, production.prob())
node = production.lhs().symbol()
tree = ProbabilisticTree(node, children, prob=p)
# If it's new a constituent, then add it to the
# constituents dictionary.
c = constituents.get((span[0], span[1], production.lhs()),
None)
if self._trace > 1:
if c is None or c != tree:
if c is None or c.prob() < tree.prob():
print ' Insert:',
else:
print ' Discard:',
self._trace_production(production, p, span, len(tokens))
if c is None or c.prob() < tree.prob():
constituents[span[0], span[1], production.lhs()] = tree
changed = 1
def _find_instantiations(self, span, constituents):
"""
@return: a list of the production instantiations that cover a
given span of the text. A X{production instantiation} is
a tuple containing a production and a list of children,
where the production's right hand side matches the list of
children; and the children cover C{span}. @rtype: C{list}
of C{pair} of C{Production}, (C{list} of
(C{ProbabilisticTree} or token.
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find production instantiations. The span is
specified as a pair of integers, where the first integer
is the index of the first token that should be covered by
the production instantiation; and the second integer is
the index of the first token that should not be covered by
the production instantiation.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. See the module
documentation for more information.
"""
rv = []
for production in self._grammar.productions():
childlists = self._match_rhs(production.rhs(), span, constituents)
for childlist in childlists:
rv.append( (production, childlist) )
return rv
def _match_rhs(self, rhs, span, constituents):
"""
@return: a set of all the lists of children that cover C{span}
and that match C{rhs}.
@rtype: C{list} of (C{list} of C{ProbabilisticTree} or
C{Token})
@type rhs: C{list} of C{Nonterminal} or (any)
@param rhs: The list specifying what kinds of children need to
cover C{span}. Each nonterminal in C{rhs} specifies
that the corresponding child should be a tree whose node
value is that nonterminal's symbol. Each terminal in C{rhs}
specifies that the corresponding child should be a token
whose type is that terminal.
@type span: C{(int, int)}
@param span: The section of the text for which we are
trying to find child lists. The span is specified as a
pair of integers, where the first integer is the index of
the first token that should be covered by the child list;
and the second integer is the index of the first token
that should not be covered by the child list.
@type constituents: C{dictionary} from
C{(int,int,Nonterminal)} to (C{ProbabilisticToken} or
C{ProbabilisticTree}).
@param constituents: The most likely constituents table. This
table records the most probable tree representation for
any given span and node value. See the module
documentation for more information.
"""
(start, end) = span
# Base case
if start >= end and rhs == (): return [[]]
if start >= end or rhs == (): return []
# Find everything that matches the 1st symbol of the RHS
childlists = []
for split in range(start, end+1):
l=constituents.get((start,split,rhs[0]))
if l is not None:
rights = self._match_rhs(rhs[1:], (split,end), constituents)
childlists += [[l]+r for r in rights]
return childlists
def _trace_production(self, production, p, span, width):
"""
Print trace output indicating that a given production has been
applied at a given location.
@param production: The production that has been applied
@type production: C{Production}
@param p: The probability of the tree produced by the production.
@type p: C{float}
@param span: The span of the production
@type span: C{tuple}
@rtype: C{None}
"""
str = '|' + '.' * span[0]
str += '=' * (span[1] - span[0])
str += '.' * (width - span[1]) + '| '
str += '%s' % production
if self._trace > 2: str = '%-40s %12.10f ' % (str, p)
print str
def _trace_lexical_insertion(self, token, index, width):
str = ' Insert: |' + '.' * index + '=' + '.' * (width-index-1) + '| '
str += '%s' % (token,)
print str
def __repr__(self):
return '<ViterbiParser for %r>' % self._grammar
##//////////////////////////////////////////////////////
## Test Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
import nltk
from nltk import tokenize
from nltk.parse import ViterbiParser
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw the man with my telescope', nltk.toy_pcfg1),
('the boy saw Jack with Bob under the table with a telescope', nltk.toy_pcfg2)]
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = sent.split()
parser = ViterbiParser(grammar)
all_parses = {}
print '\nsent: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar)
parser.trace(3)
t = time.time()
parses = parser.nbest_parse(tokens)
time = time.time()-t
if parses:
average = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else:
average = 0
num_parses = len(parses)
for p in parses:
all_parses[p.freeze()] = 1
# Print some summary statistics
print
print 'Time (secs) # Parses Average P(parse)'
print '-----------------------------------------'
print '%11.4f%11d%19.14f' % (time, num_parses, average)
parses = all_parses.keys()
if parses:
p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '------------------------------------------'
print '%11s%11d%19.14f' % ('n/a', len(parses), p)
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
from nltk.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print parse
if __name__ == '__main__':
demo()
| |
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import eth_ep
import ip_ep
module = 'ip_eth_tx_64'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_ip_hdr_valid = Signal(bool(0))
s_eth_dest_mac = Signal(intbv(0)[48:])
s_eth_src_mac = Signal(intbv(0)[48:])
s_eth_type = Signal(intbv(0)[16:])
s_ip_dscp = Signal(intbv(0)[6:])
s_ip_ecn = Signal(intbv(0)[2:])
s_ip_length = Signal(intbv(0)[16:])
s_ip_identification = Signal(intbv(0)[16:])
s_ip_flags = Signal(intbv(0)[3:])
s_ip_fragment_offset = Signal(intbv(0)[13:])
s_ip_ttl = Signal(intbv(0)[8:])
s_ip_protocol = Signal(intbv(0)[8:])
s_ip_source_ip = Signal(intbv(0)[32:])
s_ip_dest_ip = Signal(intbv(0)[32:])
s_ip_payload_axis_tdata = Signal(intbv(0)[64:])
s_ip_payload_axis_tkeep = Signal(intbv(0)[8:])
s_ip_payload_axis_tvalid = Signal(bool(0))
s_ip_payload_axis_tlast = Signal(bool(0))
s_ip_payload_axis_tuser = Signal(bool(0))
m_eth_payload_axis_tready = Signal(bool(0))
m_eth_hdr_ready = Signal(bool(0))
# Outputs
s_ip_hdr_ready = Signal(bool(0))
s_ip_payload_axis_tready = Signal(bool(0))
m_eth_hdr_valid = Signal(bool(0))
m_eth_dest_mac = Signal(intbv(0)[48:])
m_eth_src_mac = Signal(intbv(0)[48:])
m_eth_type = Signal(intbv(0)[16:])
m_eth_payload_axis_tdata = Signal(intbv(0)[64:])
m_eth_payload_axis_tkeep = Signal(intbv(0)[8:])
m_eth_payload_axis_tvalid = Signal(bool(0))
m_eth_payload_axis_tlast = Signal(bool(0))
m_eth_payload_axis_tuser = Signal(bool(0))
busy = Signal(bool(0))
error_payload_early_termination = Signal(bool(0))
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = ip_ep.IPFrameSource()
source_logic = source.create_logic(
clk,
rst,
ip_hdr_ready=s_ip_hdr_ready,
ip_hdr_valid=s_ip_hdr_valid,
eth_dest_mac=s_eth_dest_mac,
eth_src_mac=s_eth_src_mac,
eth_type=s_eth_type,
ip_dscp=s_ip_dscp,
ip_ecn=s_ip_ecn,
ip_length=s_ip_length,
ip_identification=s_ip_identification,
ip_flags=s_ip_flags,
ip_fragment_offset=s_ip_fragment_offset,
ip_ttl=s_ip_ttl,
ip_protocol=s_ip_protocol,
ip_source_ip=s_ip_source_ip,
ip_dest_ip=s_ip_dest_ip,
ip_payload_tdata=s_ip_payload_axis_tdata,
ip_payload_tkeep=s_ip_payload_axis_tkeep,
ip_payload_tvalid=s_ip_payload_axis_tvalid,
ip_payload_tready=s_ip_payload_axis_tready,
ip_payload_tlast=s_ip_payload_axis_tlast,
ip_payload_tuser=s_ip_payload_axis_tuser,
pause=source_pause,
name='source'
)
sink = eth_ep.EthFrameSink()
sink_logic = sink.create_logic(
clk,
rst,
eth_hdr_ready=m_eth_hdr_ready,
eth_hdr_valid=m_eth_hdr_valid,
eth_dest_mac=m_eth_dest_mac,
eth_src_mac=m_eth_src_mac,
eth_type=m_eth_type,
eth_payload_tdata=m_eth_payload_axis_tdata,
eth_payload_tkeep=m_eth_payload_axis_tkeep,
eth_payload_tvalid=m_eth_payload_axis_tvalid,
eth_payload_tready=m_eth_payload_axis_tready,
eth_payload_tlast=m_eth_payload_axis_tlast,
eth_payload_tuser=m_eth_payload_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_ip_hdr_valid=s_ip_hdr_valid,
s_ip_hdr_ready=s_ip_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_ip_dscp=s_ip_dscp,
s_ip_ecn=s_ip_ecn,
s_ip_length=s_ip_length,
s_ip_identification=s_ip_identification,
s_ip_flags=s_ip_flags,
s_ip_fragment_offset=s_ip_fragment_offset,
s_ip_ttl=s_ip_ttl,
s_ip_protocol=s_ip_protocol,
s_ip_source_ip=s_ip_source_ip,
s_ip_dest_ip=s_ip_dest_ip,
s_ip_payload_axis_tdata=s_ip_payload_axis_tdata,
s_ip_payload_axis_tkeep=s_ip_payload_axis_tkeep,
s_ip_payload_axis_tvalid=s_ip_payload_axis_tvalid,
s_ip_payload_axis_tready=s_ip_payload_axis_tready,
s_ip_payload_axis_tlast=s_ip_payload_axis_tlast,
s_ip_payload_axis_tuser=s_ip_payload_axis_tuser,
m_eth_hdr_valid=m_eth_hdr_valid,
m_eth_hdr_ready=m_eth_hdr_ready,
m_eth_dest_mac=m_eth_dest_mac,
m_eth_src_mac=m_eth_src_mac,
m_eth_type=m_eth_type,
m_eth_payload_axis_tdata=m_eth_payload_axis_tdata,
m_eth_payload_axis_tkeep=m_eth_payload_axis_tkeep,
m_eth_payload_axis_tvalid=m_eth_payload_axis_tvalid,
m_eth_payload_axis_tready=m_eth_payload_axis_tready,
m_eth_payload_axis_tlast=m_eth_payload_axis_tlast,
m_eth_payload_axis_tuser=m_eth_payload_axis_tuser,
busy=busy,
error_payload_early_termination=error_payload_early_termination
)
@always(delay(4))
def clkgen():
clk.next = not clk
error_payload_early_termination_asserted = Signal(bool(0))
@always(clk.posedge)
def monitor():
if (error_payload_early_termination):
error_payload_early_termination_asserted.next = 1
def wait_normal():
while s_ip_payload_axis_tvalid or m_eth_payload_axis_tvalid or s_ip_hdr_valid:
yield clk.posedge
def wait_pause_source():
while s_ip_payload_axis_tvalid or m_eth_payload_axis_tvalid or s_ip_hdr_valid:
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
source_pause.next = True
yield clk.posedge
source_pause.next = False
def wait_pause_sink():
while s_ip_payload_axis_tvalid or m_eth_payload_axis_tvalid or s_ip_hdr_valid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
for payload_len in range(1,18):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = ip_ep.IPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x0800
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80164
test_frame.ip_dest_ip = 0xc0a80165
test_frame.payload = bytearray(range(payload_len))
test_frame.build()
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1.payload.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
assert rx_frame.payload.user[-1]
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 4: trailing bytes (1), length %d" % payload_len)
current_test.next = 4
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data += bytearray(b'\x00')
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 5: trailing bytes (10), length %d" % payload_len)
current_test.next = 5
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data += bytearray(b'\x00'*10)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 6: trailing bytes with tuser assert (1), length %d" % payload_len)
current_test.next = 6
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data += bytearray(b'\x00')
test_frame1a.payload.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
assert rx_frame.payload.user[-1]
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 7: trailing bytes with tuser assert (10), length %d" % payload_len)
current_test.next = 7
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data += bytearray(b'\x00'*10)
test_frame1a.payload.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame1
assert rx_frame.payload.user[-1]
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 8: truncated payload (1), length %d" % payload_len)
current_test.next = 8
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len+1))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data = test_frame1a.payload.data[:-1]
for wait in wait_normal, wait_pause_source, wait_pause_sink:
error_payload_early_termination_asserted.next = 0
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.payload.user[-1]
assert error_payload_early_termination_asserted
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
yield clk.posedge
print("test 9: truncated payload (10), length %d" % payload_len)
current_test.next = 9
test_frame1 = ip_ep.IPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x0800
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80164
test_frame1.ip_dest_ip = 0xc0a80165
test_frame1.payload = bytearray(range(payload_len+10))
test_frame1.build()
test_frame2 = ip_ep.IPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x0800
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80164
test_frame2.ip_dest_ip = 0xc0a80166
test_frame2.payload = bytearray(range(payload_len))
test_frame2.build()
test_frame1a = ip_ep.IPFrame(test_frame1)
test_frame1a.payload.data = test_frame1a.payload.data[:-10]
for wait in wait_normal, wait_pause_source, wait_pause_sink:
error_payload_early_termination_asserted.next = 0
source.send(test_frame1a)
source.send(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame.payload.user[-1]
assert error_payload_early_termination_asserted
yield sink.wait()
rx_frame = sink.recv()
check_frame = ip_ep.IPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame == test_frame2
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| |
from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir, get_installed_version
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip10Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='eager',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'(default: %(default)s). '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.as_egg:
warnings.warn(
"--egg has been deprecated and will be removed in the future. "
"This flag is mutually exclusive with large parts of pip, and "
"actually using it invalidates pip's ability to manage the "
"installation process.",
RemovedInPip10Warning,
)
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.download_dir:
warnings.warn(
"pip install --download has been deprecated and will be "
"removed in the future. Pip now has a download command that "
"should be used instead.",
RemovedInPip10Warning,
)
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if options.prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
upgrade_strategy=options.upgrade_strategy,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
require_hashes=options.require_hashes,
progress_bar=options.progress_bar,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
prefix=options.prefix_path,
)
possible_lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=temp_target_dir,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
installed_version = get_installed_version(
req.name, possible_lib_locations
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
def get_lib_location_guesses(*args, **kwargs):
scheme = distutils_scheme('', *args, **kwargs)
return [scheme['purelib'], scheme['platlib']]
| |
# -*- coding: utf-8 -*-
import re
import os
import toml
import warnings
try:
import pypandoc
except ImportError:
pypandoc = None
from packaging.version import Version as PackageVersion
from packaging.utils import canonicalize_name
from .exceptions.poet import MissingElement, InvalidElement
from .version_parser import VersionParser
from .build import Builder
from .package import Dependency, PipDependency
from .utils.helpers import call
class Poet(object):
EXCLUDES = ()
INCLUDES = ()
def __init__(self, path, builder=Builder()):
self._path = path
self._dir = os.path.realpath(os.path.dirname(path))
self._builder = builder
self._git_config = None
self._name = None
self._version = None
self._description = None
self._authors = []
self._homepage = None
self._repository = None
self._keywords = []
self._python_versions = []
self._dependencies = []
self._dev_dependencies = []
self._pip_dependencies = []
self._pip_dev_dependencies = []
self._features = {}
self._scripts = {}
self._entry_points = {}
self._license = None
self._readme = None
self._include = []
self._exclude = []
self._extensions = {}
with open(self._path) as f:
self._config = toml.loads(f.read())
self.load()
@property
def base_dir(self):
return self._dir
@property
def git_config(self):
if self._git_config is not None:
return self._git_config
config_list = call(['git', 'config', '-l'])
self._git_config = {}
m = re.findall('(?ms)^([^=]+)=(.*?)$', config_list)
if m:
for group in m:
self._git_config[group[0]] = group[1]
return self._git_config
@property
def ignore(self):
ignore_files = [
'.gitignore'
]
ignore = []
for filename in ignore_files:
filepath = os.path.join(self._dir, filename)
if not os.path.exists(filepath):
continue
with open(filepath) as fd:
for line in fd.readlines():
if re.match('^\s*#.*$', line):
continue
ignore.append(line.strip())
return ignore
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def normalized_version(self):
"""
Return a PEP 440 compatible version.
:rtype: str
"""
return str(PackageVersion(self._version))
@property
def description(self):
return self._description
@property
def authors(self):
return self._authors
@property
def homepage(self):
return self._homepage
@property
def repository(self):
return self._repository
@property
def keywords(self):
return self._keywords
@property
def python_versions(self):
return self._python_versions
@property
def dependencies(self):
return self._dependencies
@property
def dev_dependencies(self):
return self._dev_dependencies
@property
def pip_dependencies(self):
return self._pip_dependencies
@property
def pip_dev_dependencies(self):
return self._pip_dev_dependencies
@property
def features(self):
return self._features
@property
def scripts(self):
return self._scripts
@property
def entry_points(self):
return self._entry_points
@property
def license(self):
return self._license
@property
def readme(self):
return self._readme
@property
def include(self):
return self._include
@property
def exclude(self):
return self._exclude
@property
def extensions(self):
return self._extensions
@property
def lock_file(self):
return os.path.join(self._dir, 'poetry.lock')
@property
def lock(self):
from .lock import Lock
return Lock(self.lock_file)
@property
def path(self):
return self._path
@property
def archive(self):
return '{}-{}.tar.gz'.format(self.name, self.normalized_version)
def load(self):
"""
Load data from the config.
"""
self._name = self._config['package']['name']
self._version = self._config['package']['version']
self._description = self._config['package']['description']
self._authors = self._config['package']['authors']
self._license = self._config['package'].get('license')
self._homepage = self._config['package'].get('homepage')
self._repository = self._config['package'].get('repository')
self._keywords = self._config['package'].get('keywords', [])
self._python_versions = self._config['package']['python']
self._dependencies = self._get_dependencies(self._config.get('dependencies', {}))
self._dev_dependencies = self._get_dependencies(self._config.get('dev-dependencies', {}), category='dev')
self._pip_dependencies = self._get_dependencies(self._config.get('dependencies', {}), 'pip')
self._pip_dev_dependencies = self._get_dependencies(self._config.get('dev-dependencies', {}), 'pip', category='dev')
self._features = self._get_features()
self._scripts = self._config.get('scripts', {})
self._entry_points = self._config.get('entry-points', {})
self._load_readme()
self._include = self._config['package'].get('include', []) + list(self.INCLUDES)
self._exclude = self._config['package'].get('exclude', []) + list(self.EXCLUDES)
self._extensions = self._config.get('extensions', {})
def _load_readme(self):
readme = self._config['package']['readme']
readme_path = os.path.join(self._dir, readme)
if self.has_markdown_readme():
if not pypandoc:
warnings.warn(
'Markdown README files require the pandoc utility '
'and the pypandoc package.'
)
else:
self._readme = pypandoc.convert_file(readme_path, 'rst')
else:
with open(readme_path) as f:
self._readme = f.read()
def has_markdown_readme(self):
"""
Return whether the README is a markdown one.
:rtype: boolean
"""
readme = self._config['package']['readme']
_, ext = os.path.splitext(readme)
return ext == '.md'
def is_lock(self):
return False
def build(self, **options):
self.check()
self._builder.build(self, **options)
def check(self):
"""
Checks if the poetry.toml file is valid.
"""
package = self._config.get('package')
if not package:
raise MissingElement('package')
self._check_package(package)
dependencies = self._config.get('dependencies')
if dependencies:
self._check_dependencies(dependencies)
dev_dependencies = self._config.get('dev-dependencies')
if dev_dependencies:
self._check_dependencies(dev_dependencies)
def _check_package(self, package):
if 'name' not in package:
raise MissingElement('package.name')
if 'version' not in package:
raise MissingElement('package.version')
authors = package.get('authors')
if not authors:
raise MissingElement('package.authors')
if not isinstance(authors, list):
raise InvalidElement('package.authors', 'it must be a list')
license = package.get('license')
if license:
self._check_license(license)
readme = package.get('readme')
if not readme:
raise MissingElement('package.readme')
self._check_readme(readme)
def _check_license(self, license):
pass
def _check_readme(self, readme):
readme_path = os.path.join(self._dir, readme)
if not os.path.exists(readme_path):
raise InvalidElement('package.readme', 'invalid path provided')
_, ext = os.path.splitext(readme)
if ext not in ['.md', '.rst', '.txt']:
raise InvalidElement('package.readme', 'extension [{}] is not supported'.format(ext))
def _check_dependencies(self, dependencies):
for name, constraint in dependencies.items():
self._check_package_constraint(name, constraint)
def _check_package_constraint(self, name, constraint):
message = 'Invalid constraint [{}]'.format(constraint)
if isinstance(constraint, dict):
return self._check_vcs_constraint(name, constraint)
else:
try:
return VersionParser().parse_constraints(constraint)
except ValueError:
pass
raise InvalidElement('dependencies.{}'.format(name), message)
def _check_vcs_constraint(self, name, constraint):
if 'git' in constraint:
self._check_git_constraint(name, constraint)
def _check_git_constraint(self, name, constraint):
if all(['branch' not in constraint, 'rev' not in constraint, 'tag' not in constraint]):
raise InvalidElement(
'dependencies.{}'.format(name),
'Git constraint should have one of [branch, rev, tag]'
)
def _get_dependencies(self, dependencies, kind='default', category='main'):
keys = sorted(list(dependencies.keys()))
klass = Dependency
if kind == 'pip':
klass = PipDependency
return [klass(k, dependencies[k], category=category) for k in keys]
def _get_features(self):
return self._config.get('features', {})
| |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import binascii
import calendar
import inspect
import itertools
import os
import re
from oslo.config import cfg
import six
from wormhole import exception
from wormhole.i18n import _
from wormhole.common import excutils
from wormhole.common import importutils
from wormhole.common import jsonutils
from wormhole.common import log as logging
from wormhole.common import processutils
from wormhole.common import timeutils
from wormhole import paths
from wormhole.common import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.IntOpt('ovs_vsctl_timeout',
default=120,
help='Amount of time, in seconds, that ovs_vsctl should wait '
'for a response from the database. 0 is to wait forever.'),
]
CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
def clean_conntrack(fixed_ip):
try:
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
check_exit_code=[0, 1])
except processutils.ProcessExecutionError:
LOG.exception(_('Error deleting conntrack entries for %s'), fixed_ip)
def _enable_ipv4_forwarding():
sysctl_key = 'net.ipv4.ip_forward'
stdout, stderr = _execute('sysctl', '-n', sysctl_key)
if stdout.strip() is not '1':
_execute('sysctl', '-w', '%s=1' % sysctl_key, run_as_root=True)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
return utils.execute(*cmd, **kwargs)
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
def _set_device_mtu(dev, mtu=None):
"""Set the device MTU."""
if not mtu:
mtu = CONF.network_device_mtu
if mtu:
utils.execute('ip', 'link', 'set', dev, 'mtu',
mtu, run_as_root=True,
check_exit_code=[0, 2, 254])
def _create_veth_pair(dev1_name, dev2_name):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
delete_net_dev(dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
for dev in [dev1_name, dev2_name]:
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'promisc', 'on',
run_as_root=True)
_set_device_mtu(dev)
def _ovs_vsctl(args):
full_args = ['ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args
try:
return utils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
raise exception.AgentError(method=full_args)
def _ovs_ofctl(args):
full_args = ['ovs-ofctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args
try:
return utils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
raise exception.AgentError(method=full_args)
def create_ovs_bridge(bridge_name):
bridge_args = ['--', '--may-exist', 'add-br', bridge_name]
_ovs_vsctl(bridge_args)
set_db_attribute("Port", bridge_name, "tag", "4095")
_set_device_mtu(bridge_name)
def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id,
internal=False):
interface_args = ['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id]
if internal:
interface_args.append("type=internal")
_ovs_vsctl(interface_args)
_set_device_mtu(dev)
def create_ovs_patch_port(bridge_name, local_name, remote_name):
interface_args = ['--', '--may-exist', 'add-port', bridge_name, local_name,
'--', 'set', 'Interface', local_name,
'type=patch', 'options:peer=%s' % remote_name]
_ovs_vsctl(interface_args)
def get_ovs_port_ofport(port_name):
interface_args = (["get", "Interface", port_name, "ofport"])
output = _ovs_vsctl(interface_args)
if output:
return output[0].rstrip("\n\r")
def delete_ovs_bridge(bridge_name):
bridge_args = ['--', '--if-exists', 'del-br', bridge_name]
_ovs_vsctl(bridge_args)
def delete_ovs_vif_port(bridge, dev):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
delete_net_dev(dev)
def delete_ovs_flows(bridge, ofport):
flow_args = ['del-flows', bridge, 'in_port=%s' % ofport]
_ovs_ofctl(flow_args)
def create_evs_dpdk_br(bridge):
_ovs_vsctl(['add-br', bridge, '--', 'set', 'bridge', bridge, 'datapath_type=dpdk'])
_ovs_vsctl(['set', "port", bridge, 'tag=4095'])
def create_evs_patch_port(bridge, port, patch_port):
_ovs_vsctl(['--', 'add-port', bridge, port, '--', 'set', 'interface', port, 'type=patch', 'options:peer=%s' % patch_port])
def create_evs_virtio_port(bridge, dev, iface_id, mac, instance_id,
internal=False):
sc_type = None
sf_port_id = None
list_args = ['--', '--if-exists', 'list', 'port', dev]
if str(_ovs_vsctl(list_args)).find('sf_port_id') != -1:
columns_args = ['--', '--columns=other_config', 'list', 'port', dev]
result = str(_ovs_vsctl(columns_args)).split('(')[1].split(')')[0]
re_sf_port_id= re.compile('.*sf_port_id="(.*?)".*', re.M | re.X)
match_sf_port_id = re_sf_port_id.search(result)
if match_sf_port_id:
sf_port_id = match_sf_port_id.group(1)
re_sc_type= re.compile('.*sc_type=(.*?),.*', re.M | re.X)
match_sc_type = re_sc_type.search(result)
if match_sc_type:
sc_type = match_sc_type.group(1)
interface_args = ['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'port', dev,
'other_config:port_type=virtio',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id]
if internal:
interface_args.append("type=internal")
_ovs_vsctl(interface_args)
sc_interface_args = ['set', 'port', dev]
if sf_port_id:
sc_interface_args.append('other_config:sf_port_id=%s' % sf_port_id)
if sc_type:
sc_interface_args.append('other_config:sc_type=%s' % sc_type)
if sf_port_id:
_ovs_vsctl(sc_interface_args)
def create_evs_virtio_port_bind_numa(bridge, dev, numa_id, iface_id, mac, instance_id,
internal=False):
sc_type = None
sf_port_id = None
list_args = ['--', '--if-exists', 'list', 'port', dev]
if str(_ovs_vsctl(list_args)).find('sf_port_id') != -1:
columns_args = ['--', '--columns=other_config', 'list', 'port', dev]
result = str(_ovs_vsctl(columns_args)).split('(')[1].split(')')[0]
re_sf_port_id= re.compile('.*sf_port_id="(.*?)".*', re.M | re.X)
match_sf_port_id = re_sf_port_id.search(result)
if match_sf_port_id:
sf_port_id = match_sf_port_id.group(1)
re_sc_type= re.compile('.*sc_type=(.*?),.*', re.M | re.X)
match_sc_type = re_sc_type.search(result)
if match_sc_type:
sc_type = match_sc_type.group(1)
interface_args = ['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'port', dev,
'other_config:port_type=virtio',
'other_config:numa_id=%s' % numa_id,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id]
if internal:
interface_args.append("type=internal")
_ovs_vsctl(interface_args)
sc_interface_args = ['set', 'port', dev]
if sf_port_id:
sc_interface_args.append('other_config:sf_port_id=%s' % sf_port_id)
if sc_type:
sc_interface_args.append('other_config:sc_type=%s' % sc_type)
if sf_port_id:
_ovs_vsctl(sc_interface_args)
def bridge_exists(bridge_name):
try:
_ovs_vsctl(['br-exists', bridge_name])
except RuntimeError as e:
with excutils.save_and_reraise_exception() as ctxt:
if 'Exit code: 2\n' in str(e):
ctxt.reraise = False
return False
return True
def get_evs_port_ofport(port_name):
interface_args = (["get", "Interface", port_name, "ofport"])
output = _ovs_vsctl(interface_args)
if output:
return output[0].rstrip("\n\r")
def device_exists(device):
"""Check if ethernet device exists."""
return os.path.exists('/sys/class/net/%s' % device)
def delete_evs_flows(bridge, ofport):
flow_args = ['del-flows', bridge, 'in_port=%s' % ofport]
_ovs_ofctl(flow_args)
def delete_evs_port(bridge, port):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, port])
def delete_evs_bridge(bridge ):
_ovs_vsctl(['--', '--if-exists', 'del-br', bridge])
def create_ivs_vif_port(dev, iface_id, mac, instance_id):
utils.execute('ivs-ctl', 'add-port',
dev, run_as_root=True)
def delete_ivs_vif_port(dev):
utils.execute('ivs-ctl', 'del-port', dev,
run_as_root=True)
utils.execute('ip', 'link', 'delete', dev,
run_as_root=True)
def create_tap_dev(dev, mac_address=None):
if not device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True, check_exit_code=[0, 2, 254])
except processutils.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
def delete_net_dev(dev):
"""Delete a network device only if it exists."""
if device_exists(dev):
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug("Net device removed: '%s'", dev)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed removing net device: '%s'"), dev)
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
CONF.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API
for all Linux interface drivers.
"""
def plug(self, network, mac_address):
"""Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
"""Destroy Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name."""
raise NotImplementedError()
| |
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from openstackclient.identity.v2_0 import project
from openstackclient.tests import fakes
from openstackclient.tests.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.identity.v2_0 import test_identity
class TestProject(test_identity.TestIdentityv2):
def setUp(self):
super(TestProject, self).setUp()
# Get a shortcut to the TenantManager Mock
self.projects_mock = self.app.client_manager.identity.tenants
self.projects_mock.reset_mock()
class TestProjectCreate(TestProject):
def setUp(self):
super(TestProjectCreate, self).setUp()
self.projects_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.CreateProject(self.app, None)
def test_project_create_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(columns, collist)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(data, datalist)
def test_project_create_description(self):
arglist = [
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('description', 'new desc'),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': 'new desc',
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(columns, collist)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(data, datalist)
def test_project_create_enable(self):
arglist = [
'--enable',
identity_fakes.project_name,
]
verifylist = [
('enable', True),
('disable', False),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': True,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(columns, collist)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(data, datalist)
def test_project_create_disable(self):
arglist = [
'--disable',
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', True),
('name', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'description': None,
'enabled': False,
}
self.projects_mock.create.assert_called_with(
identity_fakes.project_name,
**kwargs
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(columns, collist)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(data, datalist)
class TestProjectDelete(TestProject):
def setUp(self):
super(TestProjectDelete, self).setUp()
# This is the return value for utils.find_resource()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.delete.return_value = None
# Get the command object to test
self.cmd = project.DeleteProject(self.app, None)
def test_project_delete_no_options(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('project', identity_fakes.project_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
self.projects_mock.delete.assert_called_with(
identity_fakes.project_id,
)
class TestProjectList(TestProject):
def setUp(self):
super(TestProjectList, self).setUp()
self.projects_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
),
]
# Get the command object to test
self.cmd = project.ListProject(self.app, None)
def test_project_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
collist = ('ID', 'Name')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.project_id,
identity_fakes.project_name,
), )
self.assertEqual(tuple(data), datalist)
def test_project_list_long(self):
arglist = [
'--long',
]
verifylist = [
('long', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.list.assert_called_with()
collist = ('ID', 'Name', 'Description', 'Enabled')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.project_id,
identity_fakes.project_name,
identity_fakes.project_description,
True,
), )
self.assertEqual(tuple(data), datalist)
class TestProjectSet(TestProject):
def setUp(self):
super(TestProjectSet, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.projects_mock.update.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.SetProject(self.app, None)
def test_project_set_no_options(self):
arglist = [
identity_fakes.project_name,
]
verifylist = [
('project', identity_fakes.project_name),
('enable', False),
('disable', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
def test_project_set_name(self):
arglist = [
'--name', 'qwerty',
identity_fakes.project_name,
]
verifylist = [
('name', 'qwerty'),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': True,
'tenant_name': 'qwerty',
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_description(self):
arglist = [
'--description', 'new desc',
identity_fakes.project_name,
]
verifylist = [
('description', 'new desc'),
('enable', False),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'description': 'new desc',
'enabled': True,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_enable(self):
arglist = [
'--enable',
identity_fakes.project_name,
]
verifylist = [
('enable', True),
('disable', False),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': True,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
def test_project_set_disable(self):
arglist = [
'--disable',
identity_fakes.project_name,
]
verifylist = [
('enable', False),
('disable', True),
('project', identity_fakes.project_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.run(parsed_args)
self.assertEqual(result, 0)
# Set expected values
kwargs = {
'description': identity_fakes.project_description,
'enabled': False,
'tenant_name': identity_fakes.project_name,
}
self.projects_mock.update.assert_called_with(
identity_fakes.project_id,
**kwargs
)
class TestProjectShow(TestProject):
def setUp(self):
super(TestProjectShow, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get the command object to test
self.cmd = project.ShowProject(self.app, None)
def test_project_show(self):
arglist = [
identity_fakes.project_id,
]
verifylist = [
('project', identity_fakes.project_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.projects_mock.get.assert_called_with(
identity_fakes.project_id,
)
collist = ('description', 'enabled', 'id', 'name')
self.assertEqual(columns, collist)
datalist = (
identity_fakes.project_description,
True,
identity_fakes.project_id,
identity_fakes.project_name,
)
self.assertEqual(data, datalist)
| |
#
#
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions to bootstrap a new cluster.
"""
import os
import os.path
import re
import logging
import time
from ganeti.cmdlib import cluster
import ganeti.rpc.node as rpc
from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import config
from ganeti import constants
from ganeti import objects
from ganeti import ssconf
from ganeti import serializer
from ganeti import hypervisor
from ganeti.storage import drbd
from ganeti.storage import filestorage
from ganeti import netutils
from ganeti import luxi
from ganeti import jstore
from ganeti import pathutils
from ganeti import runtime
from ganeti import vcluster
# ec_id for InitConfig's temporary reservation manager
_INITCONF_ECID = "initconfig-ecid"
#: After how many seconds daemon must be responsive
_DAEMON_READY_TIMEOUT = 10.0
def GenerateHmacKey(file_name):
"""Writes a new HMAC key.
@type file_name: str
@param file_name: Path to output file
"""
utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
backup=True)
# pylint: disable=R0913
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
new_confd_hmac_key, new_cds,
rapi_cert_pem=None, spice_cert_pem=None,
spice_cacert_pem=None, cds=None,
nodecert_file=pathutils.NODED_CERT_FILE,
rapicert_file=pathutils.RAPI_CERT_FILE,
spicecert_file=pathutils.SPICE_CERT_FILE,
spicecacert_file=pathutils.SPICE_CACERT_FILE,
hmackey_file=pathutils.CONFD_HMAC_KEY,
cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
"""Updates the cluster certificates, keys and secrets.
@type new_cluster_cert: bool
@param new_cluster_cert: Whether to generate a new cluster certificate
@type new_rapi_cert: bool
@param new_rapi_cert: Whether to generate a new RAPI certificate
@type new_spice_cert: bool
@param new_spice_cert: Whether to generate a new SPICE certificate
@type new_confd_hmac_key: bool
@param new_confd_hmac_key: Whether to generate a new HMAC key
@type new_cds: bool
@param new_cds: Whether to generate a new cluster domain secret
@type rapi_cert_pem: string
@param rapi_cert_pem: New RAPI certificate in PEM format
@type spice_cert_pem: string
@param spice_cert_pem: New SPICE certificate in PEM format
@type spice_cacert_pem: string
@param spice_cacert_pem: Certificate of the CA that signed the SPICE
certificate, in PEM format
@type cds: string
@param cds: New cluster domain secret
@type nodecert_file: string
@param nodecert_file: optional override of the node cert file path
@type rapicert_file: string
@param rapicert_file: optional override of the rapi cert file path
@type spicecert_file: string
@param spicecert_file: optional override of the spice cert file path
@type spicecacert_file: string
@param spicecacert_file: optional override of the spice CA cert file path
@type hmackey_file: string
@param hmackey_file: optional override of the hmac key file path
"""
# pylint: disable=R0913
# noded SSL certificate
utils.GenerateNewSslCert(
new_cluster_cert, nodecert_file, 1,
"Generating new cluster certificate at %s" % nodecert_file)
# confd HMAC key
if new_confd_hmac_key or not os.path.exists(hmackey_file):
logging.debug("Writing new confd HMAC key to %s", hmackey_file)
GenerateHmacKey(hmackey_file)
if rapi_cert_pem:
# Assume rapi_pem contains a valid PEM-formatted certificate and key
logging.debug("Writing RAPI certificate at %s", rapicert_file)
utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
else:
utils.GenerateNewSslCert(
new_rapi_cert, rapicert_file, 1,
"Generating new RAPI certificate at %s" % rapicert_file)
# SPICE
spice_cert_exists = os.path.exists(spicecert_file)
spice_cacert_exists = os.path.exists(spicecacert_file)
if spice_cert_pem:
# spice_cert_pem implies also spice_cacert_pem
logging.debug("Writing SPICE certificate at %s", spicecert_file)
utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
elif new_spice_cert or not spice_cert_exists:
if spice_cert_exists:
utils.CreateBackup(spicecert_file)
if spice_cacert_exists:
utils.CreateBackup(spicecacert_file)
logging.debug("Generating new self-signed SPICE certificate at %s",
spicecert_file)
(_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
# Self-signed certificate -> the public certificate is also the CA public
# certificate
logging.debug("Writing the public certificate to %s",
spicecert_file)
utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
# Cluster domain secret
if cds:
logging.debug("Writing cluster domain secret to %s", cds_file)
utils.WriteFile(cds_file, data=cds, backup=True)
elif new_cds or not os.path.exists(cds_file):
logging.debug("Generating new cluster domain secret at %s", cds_file)
GenerateHmacKey(cds_file)
def _InitGanetiServerSetup(master_name):
"""Setup the necessary configuration for the initial node daemon.
This creates the nodepass file containing the shared password for
the cluster, generates the SSL certificate and starts the node daemon.
@type master_name: str
@param master_name: Name of the master node
"""
# Generate cluster secrets
GenerateClusterCrypto(True, False, False, False, False)
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
if result.failed:
raise errors.OpExecError("Could not start the node daemon, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForNodeDaemon(master_name)
def _WaitForNodeDaemon(node_name):
"""Wait for node daemon to become responsive.
"""
def _CheckNodeDaemon():
# Pylint bug <http://www.logilab.org/ticket/35642>
# pylint: disable=E1101
result = rpc.BootstrapRunner().call_version([node_name])[node_name]
if result.fail_msg:
raise utils.RetryAgain()
try:
utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Node daemon on %s didn't answer queries within"
" %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
def _WaitForMasterDaemon():
"""Wait for master daemon to become responsive.
"""
def _CheckMasterDaemon():
try:
cl = luxi.Client()
(cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
except Exception:
raise utils.RetryAgain()
logging.debug("Received cluster name %s from master", cluster_name)
try:
utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("Master daemon didn't answer queries within"
" %s seconds" % _DAEMON_READY_TIMEOUT)
def _WaitForSshDaemon(hostname, port):
"""Wait for SSH daemon to become responsive.
"""
family = ssconf.SimpleStore().GetPrimaryIPFamily()
hostip = netutils.GetHostname(name=hostname, family=family).ip
def _CheckSshDaemon():
if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
logging.debug("SSH daemon on %s:%s (IP address %s) has become"
" responsive", hostname, port, hostip)
else:
raise utils.RetryAgain()
try:
utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
except utils.RetryTimeout:
raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
" become responsive within %s seconds" %
(hostname, port, hostip, _DAEMON_READY_TIMEOUT))
def _InitFileStorageDir(file_storage_dir):
"""Initialize if needed the file storage.
@param file_storage_dir: the user-supplied value
@return: either empty string (if file storage was disabled at build
time) or the normalized path to the storage directory
"""
file_storage_dir = os.path.normpath(file_storage_dir)
if not os.path.isabs(file_storage_dir):
raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
" path" % file_storage_dir, errors.ECODE_INVAL)
if not os.path.exists(file_storage_dir):
try:
os.makedirs(file_storage_dir, 0750)
except OSError, err:
raise errors.OpPrereqError("Cannot create file storage directory"
" '%s': %s" % (file_storage_dir, err),
errors.ECODE_ENVIRON)
if not os.path.isdir(file_storage_dir):
raise errors.OpPrereqError("The file storage directory '%s' is not"
" a directory." % file_storage_dir,
errors.ECODE_ENVIRON)
return file_storage_dir
def _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
default_dir, file_disk_template, _storage_path_acceptance_fn,
init_fn=_InitFileStorageDir, acceptance_fn=None):
"""Checks if a file-base storage type is enabled and inits the dir.
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of enabled disk templates
@type file_storage_dir: string
@param file_storage_dir: the file storage directory
@type default_dir: string
@param default_dir: default file storage directory when C{file_storage_dir}
is 'None'
@type file_disk_template: string
@param file_disk_template: a disk template whose storage type is 'ST_FILE',
'ST_SHARED_FILE' or 'ST_GLUSTER'
@type _storage_path_acceptance_fn: function
@param _storage_path_acceptance_fn: checks whether the given file-based
storage directory is acceptable
@see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details
@rtype: string
@returns: the name of the actual file storage directory
"""
assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
))
if file_storage_dir is None:
file_storage_dir = default_dir
if not acceptance_fn:
acceptance_fn = \
lambda path: filestorage.CheckFileStoragePathAcceptance(
path, exact_match_ok=True)
_storage_path_acceptance_fn(logging.warning, file_storage_dir,
enabled_disk_templates)
file_storage_enabled = file_disk_template in enabled_disk_templates
if file_storage_enabled:
try:
acceptance_fn(file_storage_dir)
except errors.FileStoragePathError as e:
raise errors.OpPrereqError(str(e))
result_file_storage_dir = init_fn(file_storage_dir)
else:
result_file_storage_dir = file_storage_dir
return result_file_storage_dir
def _PrepareFileStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if file storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE,
cluster.CheckFileStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _PrepareSharedFileStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if shared file storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE,
cluster.CheckSharedFileStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _PrepareGlusterStorage(
enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
acceptance_fn=None):
"""Checks if gluster storage is enabled and inits the dir.
@see: C{_PrepareFileBasedStorage}
"""
return _PrepareFileBasedStorage(
enabled_disk_templates, file_storage_dir,
pathutils.DEFAULT_GLUSTER_STORAGE_DIR, constants.DT_GLUSTER,
cluster.CheckGlusterStoragePathVsEnabledDiskTemplates,
init_fn=init_fn, acceptance_fn=acceptance_fn)
def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
"""Checks the sanity of the enabled disk templates.
"""
if not enabled_disk_templates:
raise errors.OpPrereqError("Enabled disk templates list must contain at"
" least one member", errors.ECODE_INVAL)
invalid_disk_templates = \
set(enabled_disk_templates) - constants.DISK_TEMPLATES
if invalid_disk_templates:
raise errors.OpPrereqError("Enabled disk templates list contains invalid"
" entries: %s" % invalid_disk_templates,
errors.ECODE_INVAL)
def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
"""Restricts the ipolicy's disk templates to the enabled ones.
This function clears the ipolicy's list of allowed disk templates from the
ones that are not enabled by the cluster.
@type ipolicy: dict
@param ipolicy: the instance policy
@type enabled_disk_templates: list of string
@param enabled_disk_templates: the list of cluster-wide enabled disk
templates
"""
assert constants.IPOLICY_DTS in ipolicy
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
restricted_disk_templates = list(set(allowed_disk_templates)
.intersection(set(enabled_disk_templates)))
ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
def _InitCheckDrbdHelper(drbd_helper, drbd_enabled):
"""Checks the DRBD usermode helper.
@type drbd_helper: string
@param drbd_helper: name of the DRBD usermode helper that the system should
use
"""
if not drbd_enabled:
return
if drbd_helper is not None:
try:
curr_helper = drbd.DRBD8.GetUsermodeHelper()
except errors.BlockDeviceError, err:
raise errors.OpPrereqError("Error while checking drbd helper"
" (disable drbd with --enabled-disk-templates"
" if you are not using drbd): %s" % str(err),
errors.ECODE_ENVIRON)
if drbd_helper != curr_helper:
raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
" is the current helper" % (drbd_helper,
curr_helper),
errors.ECODE_INVAL)
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
master_netmask, master_netdev, file_storage_dir,
shared_file_storage_dir, gluster_storage_dir,
candidate_pool_size, secondary_ip=None,
vg_name=None, beparams=None, nicparams=None, ndparams=None,
hvparams=None, diskparams=None, enabled_hypervisors=None,
modify_etc_hosts=True, modify_ssh_setup=True,
maintain_node_health=False, drbd_helper=None, uid_pool=None,
default_iallocator=None, default_iallocator_params=None,
primary_ip_version=None, ipolicy=None,
prealloc_wipe_disks=False, use_external_mip_script=False,
hv_state=None, disk_state=None, enabled_disk_templates=None,
install_image=None, zeroing_image=None, compression_tools=None,
enabled_user_shutdown=False):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of disk_templates to be used in this
cluster
@type enabled_user_shutdown: bool
@param enabled_user_shutdown: whether user shutdown is enabled cluster
wide
"""
# TODO: complete the docstring
if config.ConfigWriter.IsCluster():
raise errors.OpPrereqError("Cluster is already initialised",
errors.ECODE_STATE)
data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
for ddir in [queue_dir, data_dir, archive_dir]:
if os.path.isdir(ddir):
for entry in os.listdir(ddir):
if not os.path.isdir(os.path.join(ddir, entry)):
raise errors.OpPrereqError(
"%s contains non-directory enries like %s. Remove left-overs of an"
" old cluster before initialising a new one" % (ddir, entry),
errors.ECODE_STATE)
if not enabled_hypervisors:
raise errors.OpPrereqError("Enabled hypervisors list must contain at"
" least one member", errors.ECODE_INVAL)
invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Enabled hypervisors contains invalid"
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
_InitCheckEnabledDiskTemplates(enabled_disk_templates)
try:
ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
except errors.ProgrammerError:
raise errors.OpPrereqError("Invalid primary ip version: %d." %
primary_ip_version, errors.ECODE_INVAL)
hostname = netutils.GetHostname(family=ipcls.family)
if not ipcls.IsValid(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
" address." % (hostname.ip, primary_ip_version),
errors.ECODE_INVAL)
if ipcls.IsLoopback(hostname.ip):
raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
" address. Please fix DNS or %s." %
(hostname.ip, pathutils.ETC_HOSTS),
errors.ECODE_ENVIRON)
if not ipcls.Own(hostname.ip):
raise errors.OpPrereqError("Inconsistency: this host's name resolves"
" to %s,\nbut this ip address does not"
" belong to this host" %
hostname.ip, errors.ECODE_ENVIRON)
clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
raise errors.OpPrereqError("Cluster IP already active",
errors.ECODE_NOTUNIQUE)
if not secondary_ip:
if primary_ip_version == constants.IP6_VERSION:
raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
" IPv4 address must be given as secondary",
errors.ECODE_INVAL)
secondary_ip = hostname.ip
if not netutils.IP4Address.IsValid(secondary_ip):
raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
" IPv4 address." % secondary_ip,
errors.ECODE_INVAL)
if not netutils.IP4Address.Own(secondary_ip):
raise errors.OpPrereqError("You gave %s as secondary IP,"
" but it does not belong to this host." %
secondary_ip, errors.ECODE_ENVIRON)
if master_netmask is not None:
if not ipcls.ValidateNetmask(master_netmask):
raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
(master_netmask, primary_ip_version),
errors.ECODE_INVAL)
else:
master_netmask = ipcls.iplen
if vg_name:
# Check if volume group is valid
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
constants.MIN_VG_SIZE)
if vgstatus:
raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
_InitCheckDrbdHelper(drbd_helper, drbd_enabled)
logging.debug("Stopping daemons (if any are running)")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
if result.failed:
raise errors.OpExecError("Could not stop daemons, command %s"
" had exitcode %s and error '%s'" %
(result.cmd, result.exit_code, result.output))
file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
file_storage_dir)
shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
shared_file_storage_dir)
gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
gluster_storage_dir)
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
errors.ECODE_INVAL)
if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
# Do not do this check if mode=openvswitch, since the openvswitch is not
# created yet
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
objects.UpgradeBeParams(beparams)
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
objects.NIC.CheckParameterSyntax(nicparams)
full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
_RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
else:
ndparams = dict(constants.NDC_DEFAULTS)
# This is ugly, as we modify the dict itself
# FIXME: Make utils.ForceDictType pure functional or write a wrapper
# around it
if hv_state:
for hvname, hvs_data in hv_state.items():
utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
else:
hv_state = dict((hvname, constants.HVST_DEFAULTS)
for hvname in enabled_hypervisors)
# FIXME: disk_state has no default values yet
if disk_state:
for storage, ds_data in disk_state.items():
if storage not in constants.DS_VALID_TYPES:
raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
storage, errors.ECODE_INVAL)
for ds_name, state in ds_data.items():
utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
# hvparams is a mapping of hypervisor->hvparams dict
for hv_name, hv_params in hvparams.iteritems():
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class = hypervisor.GetHypervisor(hv_name)
hv_class.CheckParameterSyntax(hv_params)
# diskparams is a mapping of disk-template->diskparams dict
for template, dt_params in diskparams.items():
param_keys = set(dt_params.keys())
default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
if not (param_keys <= default_param_keys):
unknown_params = param_keys - default_param_keys
raise errors.OpPrereqError("Invalid parameters for disk template %s:"
" %s" % (template,
utils.CommaJoin(unknown_params)),
errors.ECODE_INVAL)
utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
if template == constants.DT_DRBD8 and vg_name is not None:
# The default METAVG value is equal to the VG name set at init time,
# if provided
dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
try:
utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
except errors.OpPrereqError, err:
raise errors.OpPrereqError("While verify diskparam options: %s" % err,
errors.ECODE_INVAL)
# set up ssh config and /etc/hosts
rsa_sshkey = ""
dsa_sshkey = ""
if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
rsa_sshkey = sshline.split(" ")[1]
if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
dsa_sshkey = sshline.split(" ")[1]
if not rsa_sshkey and not dsa_sshkey:
raise errors.OpPrereqError("Failed to find SSH public keys",
errors.ECODE_ENVIRON)
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name, hostname.ip)
if modify_ssh_setup:
ssh.InitSSHSetup()
if default_iallocator is not None:
alloc_script = utils.FindFile(default_iallocator,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile)
if alloc_script is None:
raise errors.OpPrereqError("Invalid default iallocator script '%s'"
" specified" % default_iallocator,
errors.ECODE_INVAL)
else:
# default to htools
if utils.FindFile(constants.IALLOC_HAIL,
constants.IALLOCATOR_SEARCH_PATH,
os.path.isfile):
default_iallocator = constants.IALLOC_HAIL
# check if we have all the users we need
try:
runtime.GetEnts()
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Required system user/group missing: %s" %
err, errors.ECODE_ENVIRON)
candidate_certs = {}
now = time.time()
if compression_tools is not None:
cluster.CheckCompressionTools(compression_tools)
initial_dc_config = dict(active=True,
interval=int(constants.MOND_TIME_INTERVAL * 1e6))
data_collectors = dict(
(name, initial_dc_config.copy())
for name in constants.DATA_COLLECTOR_NAMES)
# init of cluster config file
cluster_config = objects.Cluster(
serial_no=1,
rsahostkeypub=rsa_sshkey,
dsahostkeypub=dsa_sshkey,
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
mac_prefix=mac_prefix,
volume_group_name=vg_name,
tcpudp_port_pool=set(),
master_ip=clustername.ip,
master_netmask=master_netmask,
master_netdev=master_netdev,
cluster_name=clustername.name,
file_storage_dir=file_storage_dir,
shared_file_storage_dir=shared_file_storage_dir,
gluster_storage_dir=gluster_storage_dir,
enabled_hypervisors=enabled_hypervisors,
beparams={constants.PP_DEFAULT: beparams},
nicparams={constants.PP_DEFAULT: nicparams},
ndparams=ndparams,
hvparams=hvparams,
diskparams=diskparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
uid_pool=uid_pool,
ctime=now,
mtime=now,
maintain_node_health=maintain_node_health,
data_collectors=data_collectors,
drbd_usermode_helper=drbd_helper,
default_iallocator=default_iallocator,
default_iallocator_params=default_iallocator_params,
primary_ip_family=ipcls.family,
prealloc_wipe_disks=prealloc_wipe_disks,
use_external_mip_script=use_external_mip_script,
ipolicy=full_ipolicy,
hv_state_static=hv_state,
disk_state_static=disk_state,
enabled_disk_templates=enabled_disk_templates,
candidate_certs=candidate_certs,
osparams={},
osparams_private_cluster={},
install_image=install_image,
zeroing_image=zeroing_image,
compression_tools=compression_tools,
enabled_user_shutdown=enabled_user_shutdown,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
secondary_ip=secondary_ip,
serial_no=1,
master_candidate=True,
offline=False, drained=False,
ctime=now, mtime=now,
)
InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
cfg = config.ConfigWriter(offline=True)
ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
cfg.Update(cfg.GetClusterInfo(), logging.error)
ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
master_uuid = cfg.GetMasterNode()
if modify_ssh_setup:
ssh.InitPubKeyFile(master_uuid)
# set up the inter-node password and certificate
_InitGanetiServerSetup(hostname.name)
logging.debug("Starting daemons")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
if result.failed:
raise errors.OpExecError("Could not start daemons, command %s"
" had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output))
_WaitForMasterDaemon()
def InitConfig(version, cluster_config, master_node_config,
cfg_file=pathutils.CLUSTER_CONF_FILE):
"""Create the initial cluster configuration.
It will contain the current node, which will also be the master
node, and no instances.
@type version: int
@param version: configuration version
@type cluster_config: L{objects.Cluster}
@param cluster_config: cluster configuration
@type master_node_config: L{objects.Node}
@param master_node_config: master node configuration
@type cfg_file: string
@param cfg_file: configuration file path
"""
uuid_generator = config.TemporaryReservationManager()
cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
_INITCONF_ECID)
cluster_config.master_node = master_node_config.uuid
nodes = {
master_node_config.uuid: master_node_config,
}
default_nodegroup = objects.NodeGroup(
uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
name=constants.INITIAL_NODE_GROUP_NAME,
members=[master_node_config.uuid],
diskparams={},
)
nodegroups = {
default_nodegroup.uuid: default_nodegroup,
}
now = time.time()
config_data = objects.ConfigData(version=version,
cluster=cluster_config,
nodegroups=nodegroups,
nodes=nodes,
instances={},
networks={},
disks={},
filters={},
serial_no=1,
ctime=now, mtime=now)
utils.WriteFile(cfg_file,
data=serializer.Dump(config_data.ToDict()),
mode=0600)
def FinalizeClusterDestroy(master_uuid):
"""Execute the last steps of cluster destroy
This function shuts down all the daemons, completing the destroy
begun in cmdlib.LUDestroyOpcode.
"""
livelock = utils.livelock.LiveLock("bootstrap_destroy")
cfg = config.GetConfig(None, livelock)
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
runner = rpc.BootstrapRunner()
master_name = cfg.GetNodeName(master_uuid)
master_params = cfg.GetMasterNetworkParameters()
master_params.uuid = master_uuid
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(master_name, master_params,
ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(master_name)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s", msg)
result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
" the node: %s", msg)
def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
to the remote node, copy the needed files, and start ganeti-noded,
allowing the master to do the rest via normal rpc calls.
@param cluster_name: the cluster name
@param node: the name of the new node
@param ssh_port: the SSH port of the new node
"""
data = {
constants.NDS_CLUSTER_NAME: cluster_name,
constants.NDS_NODE_DAEMON_CERTIFICATE:
utils.ReadFile(pathutils.NODED_CERT_FILE),
constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
constants.NDS_START_NODE_DAEMON: True,
}
ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
ssh_port, data,
debug=opts.debug, verbose=opts.verbose,
use_cluster_key=True, ask_key=opts.ssh_key_check,
strict_host_check=opts.ssh_key_check,
ensure_version=True)
_WaitForSshDaemon(node, ssh_port)
_WaitForNodeDaemon(node)
def MasterFailover(no_voting=False):
"""Failover the master node.
This checks that we are not already the master, and will cause the
current master to cease being master, and the non-master to become
new master.
@type no_voting: boolean
@param no_voting: force the operation without remote nodes agreement
(dangerous)
"""
sstore = ssconf.SimpleStore()
old_master, new_master = ssconf.GetMasterAndMyself(sstore)
node_names = sstore.GetNodeList()
mc_list = sstore.GetMasterCandidates()
if old_master == new_master:
raise errors.OpPrereqError("This commands must be run on the node"
" where you want the new master to be."
" %s is already the master" %
old_master, errors.ECODE_INVAL)
if new_master not in mc_list:
mc_no_master = [name for name in mc_list if name != old_master]
raise errors.OpPrereqError("This node is not among the nodes marked"
" as master candidates. Only these nodes"
" can become masters. Current list of"
" master candidates is:\n"
"%s" % ("\n".join(mc_no_master)),
errors.ECODE_STATE)
if not no_voting:
vote_list = GatherMasterVotes(node_names)
if vote_list:
voted_master = vote_list[0][0]
if voted_master is None:
raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
" not respond.", errors.ECODE_ENVIRON)
elif voted_master != old_master:
raise errors.OpPrereqError("I have a wrong configuration, I believe"
" the master is %s but the other nodes"
" voted %s. Please resync the configuration"
" of this node." %
(old_master, voted_master),
errors.ECODE_STATE)
# end checks
rcode = 0
logging.info("Setting master to %s, old master: %s", new_master, old_master)
try:
# Forcefully start WConfd so that we can access the configuration
result = utils.RunCmd([pathutils.DAEMON_UTIL,
"start", constants.WCONFD, "--force-node",
"--no-voting", "--yes-do-it"])
if result.failed:
raise errors.OpPrereqError("Could not start the configuration daemon,"
" command %s had exitcode %s and error %s" %
(result.cmd, result.exit_code, result.output),
errors.ECODE_NOENT)
# instantiate a real config writer, as we now know we have the
# configuration data
livelock = utils.livelock.LiveLock("bootstrap_failover")
cfg = config.GetConfig(None, livelock, accept_foreign=True)
old_master_node = cfg.GetNodeInfoByName(old_master)
if old_master_node is None:
raise errors.OpPrereqError("Could not find old master node '%s' in"
" cluster configuration." % old_master,
errors.ECODE_NOENT)
cluster_info = cfg.GetClusterInfo()
new_master_node = cfg.GetNodeInfoByName(new_master)
if new_master_node is None:
raise errors.OpPrereqError("Could not find new master node '%s' in"
" cluster configuration." % new_master,
errors.ECODE_NOENT)
cluster_info.master_node = new_master_node.uuid
# this will also regenerate the ssconf files, since we updated the
# cluster info
cfg.Update(cluster_info, logging.error)
# if cfg.Update worked, then it means the old master daemon won't be
# able now to write its own config file (we rely on locking in both
# backend.UploadFile() and ConfigWriter._Write(); hence the next
# step is to kill the old master
logging.info("Stopping the master daemon on node %s", old_master)
runner = rpc.BootstrapRunner()
master_params = cfg.GetMasterNetworkParameters()
master_params.uuid = old_master_node.uuid
ems = cfg.GetUseExternalMipScript()
result = runner.call_node_deactivate_master_ip(old_master,
master_params, ems)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master IP: %s", msg)
result = runner.call_node_stop_master(old_master)
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
except errors.ConfigurationError, err:
logging.error("Error while trying to set the new master: %s",
str(err))
return 1
finally:
# stop WConfd again:
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD])
if result.failed:
logging.error("Could not stop the configuration daemon,"
" command %s had exitcode %s and error %s",
result.cmd, result.exit_code, result.output)
logging.info("Checking master IP non-reachability...")
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
def _check_ip():
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
raise utils.RetryAgain()
try:
utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
except utils.RetryTimeout:
logging.warning("The master IP is still reachable after %s seconds,"
" continuing but activating the master on the current"
" node will probably fail", total_timeout)
if jstore.CheckDrainFlag():
logging.info("Undraining job queue")
jstore.SetDrainFlag(False)
logging.info("Starting the master daemons on the new master")
result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
no_voting)
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
" %s, please check: %s", new_master, msg)
rcode = 1
logging.info("Master failed over from %s to %s", old_master, new_master)
return rcode
def GetMaster():
"""Returns the current master node.
This is a separate function in bootstrap since it's needed by
gnt-cluster, and instead of importing directly ssconf, it's better
to abstract it in bootstrap, where we do use ssconf in other
functions too.
"""
sstore = ssconf.SimpleStore()
old_master, _ = ssconf.GetMasterAndMyself(sstore)
return old_master
def GatherMasterVotes(node_names):
"""Check the agreement on who is the master.
This function will return a list of (node, number of votes), ordered
by the number of votes. Errors will be denoted by the key 'None'.
Note that the sum of votes is the number of nodes this machine
knows, whereas the number of entries in the list could be different
(if some nodes vote for another master).
We remove ourselves from the list since we know that (bugs aside)
since we use the same source for configuration information for both
backend and boostrap, we'll always vote for ourselves.
@type node_names: list
@param node_names: the list of nodes to query for master info; the current
node will be removed if it is in the list
@rtype: list
@return: list of (node, votes)
"""
myself = netutils.Hostname.GetSysName()
try:
node_names.remove(myself)
except ValueError:
pass
if not node_names:
# no nodes left (eventually after removing myself)
return []
results = rpc.BootstrapRunner().call_master_node_name(node_names)
if not isinstance(results, dict):
# this should not happen (unless internal error in rpc)
logging.critical("Can't complete rpc call, aborting master startup")
return [(None, len(node_names))]
votes = {}
for node_name in results:
nres = results[node_name]
msg = nres.fail_msg
if msg:
logging.warning("Error contacting node %s: %s", node_name, msg)
node = None
else:
node = nres.payload
if node not in votes:
votes[node] = 1
else:
votes[node] += 1
vote_list = [v for v in votes.items()]
# sort first on number of votes then on name, since we want None
# sorted later if we have the half of the nodes not responding, and
# half voting all for the same master
vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
return vote_list
| |
import time
from utils.data_utils import prepare_datasets
from utils import constants as Constants
from model import Model
import torch
import os
import json
from torch.utils.data import DataLoader
from utils.timer import Timer
from utils.logger import ModelLogger
from utils.eval_utils import AverageMeter
from utils.data_utils import sanitize_input, vectorize_input
class ModelHandler(object):
"""High level model_handler that trains/validates/tests the network,
tracks and logs metrics.
"""
def __init__(self, config):
self.logger = ModelLogger(config, dirname=config['dir'], pretrained=config['pretrained'])
self.dirname = self.logger.dirname
cuda = config['cuda']
cuda_id = config['cuda_id']
if not cuda:
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda' if cuda_id < 0 else 'cuda:%d' % cuda_id)
datasets = prepare_datasets(config)
train_set = datasets['train']
dev_set = datasets['dev']
test_set = datasets['test']
# Evaluation Metrics:
self._train_loss = AverageMeter()
self._train_f1 = AverageMeter()
self._train_em = AverageMeter()
self._dev_f1 = AverageMeter()
self._dev_em = AverageMeter()
if train_set:
self.train_loader = DataLoader(train_set, batch_size=config['batch_size'],
shuffle=config['shuffle'], collate_fn=lambda x: x, pin_memory=True)
self._n_train_batches = len(train_set) // config['batch_size']
else:
self.train_loader = None
if dev_set:
self.dev_loader = DataLoader(dev_set, batch_size=config['batch_size'],
shuffle=False, collate_fn=lambda x: x, pin_memory=True)
self._n_dev_batches = len(dev_set) // config['batch_size']
else:
self.dev_loader = None
if test_set:
self.test_loader = DataLoader(test_set, batch_size=config['batch_size'], shuffle=False,
collate_fn=lambda x: x, pin_memory=True)
self._n_test_batches = len(test_set) // config['batch_size']
self._n_test_examples = len(test_set)
else:
self.test_loader = None
self._n_train_examples = 0
self.model = Model(config, train_set)
self.model.network = self.model.network.to(self.device)
self.config = self.model.config
self.is_test = False
def train(self):
if self.train_loader is None or self.dev_loader is None:
print("No training set or dev set specified -- skipped training.")
return
self.is_test = False
timer = Timer("Train")
self._epoch = self._best_epoch = 0
if self.dev_loader is not None:
print("\n>>> Dev Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.dev_loader, training=False, verbose=self.config['verbose'])
timer.interval("Validation Epoch {}".format(self._epoch))
format_str = "Validation Epoch {} -- F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._dev_f1.mean(), self._dev_em.mean()))
self._best_f1 = self._dev_f1.mean()
self._best_em = self._dev_em.mean()
if self.config['save_params']:
self.model.save(self.dirname)
self._reset_metrics()
while self._stop_condition(self._epoch):
self._epoch += 1
print("\n>>> Train Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.train_loader, training=True, verbose=self.config['verbose'])
train_epoch_time = timer.interval("Training Epoch {}".format(self._epoch))
format_str = "Training Epoch {} -- Loss: {:0.4f}, F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._train_loss.mean(),
self._train_f1.mean(), self._train_em.mean()))
print("\n>>> Dev Epoch: [{} / {}]".format(self._epoch, self.config['max_epochs']))
self._run_epoch(self.dev_loader, training=False, verbose=self.config['verbose'])
timer.interval("Validation Epoch {}".format(self._epoch))
format_str = "Validation Epoch {} -- F1: {:0.2f}, EM: {:0.2f} --"
print(format_str.format(self._epoch, self._dev_f1.mean(), self._dev_em.mean()))
if self._best_f1 <= self._dev_f1.mean(): # Can be one of loss, f1, or em.
self._best_epoch = self._epoch
self._best_f1 = self._dev_f1.mean()
self._best_em = self._dev_em.mean()
if self.config['save_params']:
self.model.save(self.dirname)
print("!!! Updated: F1: {:0.2f}, EM: {:0.2f}".format(self._best_f1, self._best_em))
self._reset_metrics()
self.logger.log(self._train_loss.last, Constants._TRAIN_LOSS_EPOCH_LOG)
self.logger.log(self._train_f1.last, Constants._TRAIN_F1_EPOCH_LOG)
self.logger.log(self._train_em.last, Constants._TRAIN_EM_EPOCH_LOG)
self.logger.log(self._dev_f1.last, Constants._DEV_F1_EPOCH_LOG)
self.logger.log(self._dev_em.last, Constants._DEV_EM_EPOCH_LOG)
self.logger.log(train_epoch_time, Constants._TRAIN_EPOCH_TIME_LOG)
timer.finish()
self.training_time = timer.total
print("Finished Training: {}".format(self.dirname))
print(self.summary())
def test(self):
if self.test_loader is None:
print("No testing set specified -- skipped testing.")
return
self.is_test = True
self._reset_metrics()
timer = Timer("Test")
output = self._run_epoch(self.test_loader, training=False, verbose=0,
out_predictions=self.config['out_predictions'])
for ex in output:
_id = ex['id']
ex['id'] = _id[0]
ex['turn_id'] = _id[1]
if self.config['out_predictions']:
output_file = os.path.join(self.dirname, Constants._PREDICTION_FILE)
with open(output_file, 'w') as outfile:
json.dump(output, outfile, indent=4)
test_f1 = self._dev_f1.mean()
test_em = self._dev_em.mean()
timer.finish()
print(self.report(self._n_test_batches, None, test_f1, test_em, mode='test'))
self.logger.log([test_f1, test_em], Constants._TEST_EVAL_LOG)
print("Finished Testing: {}".format(self.dirname))
def _run_epoch(self, data_loader, training=True, verbose=10, out_predictions=False):
start_time = time.time()
output = []
for step, input_batch in enumerate(data_loader):
input_batch = sanitize_input(input_batch, self.config, self.model.word_dict,
self.model.feature_dict, training=training)
x_batch = vectorize_input(input_batch, self.config, training=training, device=self.device)
if not x_batch:
continue # When there are no target spans present in the batch
res = self.model.predict(x_batch, update=training, out_predictions=out_predictions)
loss = res['loss']
f1 = res['f1']
em = res['em']
self._update_metrics(loss, f1, em, x_batch['batch_size'], training=training)
if training:
self._n_train_examples += x_batch['batch_size']
if (verbose > 0) and (step % verbose == 0):
mode = "train" if training else ("test" if self.is_test else "dev")
print(self.report(step, loss, f1 * 100, em * 100, mode))
print('used_time: {:0.2f}s'.format(time.time() - start_time))
if out_predictions:
for id, prediction, span in zip(input_batch['id'], res['predictions'], res['spans']):
output.append({'id': id,
'answer': prediction,
'span_start': span[0],
'span_end': span[1]})
return output
def report(self, step, loss, f1, em, mode='train'):
if mode == "train":
format_str = "[train-{}] step: [{} / {}] | exs = {} | loss = {:0.4f} | f1 = {:0.2f} | em = {:0.2f}"
return format_str.format(self._epoch, step, self._n_train_batches, self._n_train_examples, loss, f1, em)
elif mode == "dev":
return "[predict-{}] step: [{} / {}] | f1 = {:0.2f} | em = {:0.2f}".format(
self._epoch, step, self._n_dev_batches, f1, em)
elif mode == "test":
return "[test] | test_exs = {} | step: [{} / {}] | f1 = {:0.2f} | em = {:0.2f}".format(
self._n_test_examples, step, self._n_test_batches, f1, em)
else:
raise ValueError('mode = {} not supported.' % mode)
def summary(self):
start = " <<<<<<<<<<<<<<<< MODEL SUMMARY >>>>>>>>>>>>>>>> "
info = "Best epoch = {}\nDev F1 = {:0.2f}\nDev EM = {:0.2f}".format(
self._best_epoch, self._best_f1, self._best_em)
end = " <<<<<<<<<<<<<<<< MODEL SUMMARY >>>>>>>>>>>>>>>> "
return "\n".join([start, info, end])
def _update_metrics(self, loss, f1, em, batch_size, training=True):
if training:
self._train_loss.update(loss)
self._train_f1.update(f1 * 100, batch_size)
self._train_em.update(em * 100, batch_size)
else:
self._dev_f1.update(f1 * 100, batch_size)
self._dev_em.update(em * 100, batch_size)
def _reset_metrics(self):
self._train_loss.reset()
self._train_f1.reset()
self._train_em.reset()
self._dev_f1.reset()
self._dev_em.reset()
def _stop_condition(self, epoch):
"""
Checks have not exceeded max epochs and has not gone 10 epochs without improvement.
"""
no_improvement = epoch >= self._best_epoch + 10
exceeded_max_epochs = epoch >= self.config['max_epochs']
return False if exceeded_max_epochs or no_improvement else True
| |
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrum
from electrum import bitcoin
from electrum import keystore
from electrum.bitcoin import *
from electrum.mnemonic import Mnemonic
from electrum import version
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugins import BasePlugin, hook
from electrum.util import NotEnoughFunds
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
response = requests.request(method, url, **kwargs)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def get_max_amount(self, config, inputs, recipient, fee):
from electrum.transaction import Transaction
sendable = sum(map(lambda x:x['value'], inputs))
for i in inputs:
self.add_input_info(i)
xf = self.extra_fee(config)
_type, addr = recipient
if xf and sendable >= xf:
billing_address = self.billing_info['billing_address']
sendable -= xf
outputs = [(_type, addr, sendable),
(TYPE_ADDRESS, billing_address, xf)]
else:
outputs = [(_type, addr, sendable)]
dummy_tx = Transaction.from_io(inputs, outputs)
if fee is None:
fee = self.estimate_fee(config, dummy_tx.estimated_size())
amount = max(0, sendable - fee)
return amount, fee
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# trustedcoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(billing_xpub, long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def request_billing_info(self, wallet):
self.print_error("request billing info")
billing_info = server.get(wallet.get_user_id()[1])
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
self.requesting = False
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
def get_xkeys(self, seed, passphrase, derivation):
from electrum.mnemonic import Mnemonic
from electrum.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 24:
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise BaseException('unrecognized seed length')
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt, k1, k2):
k1.update_password(None, password)
wizard.storage.set_password(password, encrypt)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: %s.")%os.path.abspath(wizard.storage.path),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "%s" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.") % _('Cancel'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(signing_xpub, long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_password(password, encrypt)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = regenerate_key(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
| |
from django import forms
from django.conf import settings
from django.db import models
from django.db.models.fields import related
from django.utils import translation as translation_utils
from django.utils.translation.trans_real import to_language
from .hold import add_translation, make_key, save_translations
from .models import (Translation, PurifiedTranslation, LinkifiedTranslation,
NoLinksTranslation, NoLinksNoMarkupTranslation)
from .utils import to_language as mkt_to_language
from .widgets import TransInput, TransTextarea
class TranslatedField(models.ForeignKey):
"""
A foreign key to the translations table.
If require_locale=False, the fallback join will not use a locale. Instead,
we will look for 1) a translation in the current locale and 2) fallback
with any translation matching the foreign key.
"""
requires_unique_target = False
to = Translation
def __init__(self, **kwargs):
# to_field: The field on the related object that the relation is to.
# Django wants to default to translations.autoid, but we need id.
options = dict(null=True, to_field='id', unique=True, blank=True,
on_delete=models.SET_NULL)
kwargs.update(options)
self.short = kwargs.pop('short', True)
self.require_locale = kwargs.pop('require_locale', True)
# "to" is passed here from the migration framework; we ignore it
# since it's the same for every instance.
kwargs.pop('to', None)
super(TranslatedField, self).__init__(self.to, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(TranslatedField, self).deconstruct()
kwargs['to'] = self.to
kwargs['short'] = self.short
kwargs['require_locale'] = self.require_locale
return (name, path, args, kwargs)
@property
def db_column(self):
# Django wants to call the db_column ('%s_id' % self.name), but our
# translations foreign keys aren't set up that way.
return self._db_column if hasattr(self, '_db_column') else self.name
@db_column.setter
def db_column(self, value):
# Django sets db_column=None to initialize it. I don't think anyone
# would set the db_column otherwise.
if value is not None:
self._db_column = value
def contribute_to_class(self, cls, name):
"""Add this Translation to ``cls._meta.translated_fields``."""
super(TranslatedField, self).contribute_to_class(cls, name)
# Add self to the list of translated fields.
if hasattr(cls._meta, 'translated_fields'):
cls._meta.translated_fields.append(self)
else:
cls._meta.translated_fields = [self]
# Set up a unique related name. The + means it's hidden.
self.rel.related_name = '%s_%s_set+' % (cls.__name__, name)
# Replace the normal descriptor with our custom descriptor.
setattr(cls, self.name, TranslationDescriptor(self))
def formfield(self, **kw):
widget = TransInput if self.short else TransTextarea
defaults = {'form_class': TransField, 'widget': widget}
defaults.update(kw)
return super(TranslatedField, self).formfield(**defaults)
def validate(self, value, model_instance):
# Skip ForeignKey.validate since that expects only one Translation when
# doing .get(id=id)
return models.Field.validate(self, value, model_instance)
class PurifiedField(TranslatedField):
to = PurifiedTranslation
class LinkifiedField(TranslatedField):
to = LinkifiedTranslation
class NoLinksField(TranslatedField):
to = NoLinksTranslation
class NoLinksNoMarkupField(TranslatedField):
to = NoLinksNoMarkupTranslation
def switch(obj, new_model):
"""Switch between Translation and Purified/Linkified Translations."""
fields = [(f.name, getattr(obj, f.name)) for f in new_model._meta.fields]
return new_model(**dict(fields))
def save_on_signal(obj, trans):
"""Connect signals so the translation gets saved during obj.save()."""
signal = models.signals.pre_save
def cb(sender, instance, **kw):
if instance is obj:
is_new = trans.autoid is None
trans.save(force_insert=is_new, force_update=not is_new)
signal.disconnect(cb)
signal.connect(cb, sender=obj.__class__, weak=False)
class TranslationDescriptor(related.ReverseSingleRelatedObjectDescriptor):
"""
Descriptor that handles creating and updating Translations given strings.
"""
def __init__(self, field):
super(TranslationDescriptor, self).__init__(field)
self.model = field.rel.to
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# If Django doesn't find find the value in the cache (which would only
# happen if the field was set or accessed already), it does a db query
# to follow the foreign key. We expect translations to be set by
# queryset transforms, so doing a query is the wrong thing here.
try:
return getattr(instance, self.field.get_cache_name())
except AttributeError:
return None
def __set__(self, instance, value):
lang = translation_utils.get_language() or 'en-us'
if isinstance(value, basestring):
value = self.translation_from_string(instance, lang, value)
elif hasattr(value, 'items'):
value = self.translation_from_dict(instance, lang, value)
# Don't let this be set to None, because Django will then blank out the
# foreign key for this object. That's incorrect for translations.
if value is not None:
# We always get these back from the database as Translations, but
# we may want them to be a more specific Purified/Linkified child
# class.
if not isinstance(value, self.model):
value = switch(value, self.model)
super(TranslationDescriptor, self).__set__(instance, value)
elif getattr(instance, self.field.attname, None) is None:
super(TranslationDescriptor, self).__set__(instance, None)
def translation_from_string(self, instance, lang, string):
"""Create, save, and return a Translation from a string."""
lang = lang.lower()
try:
trans = getattr(instance, self.field.name)
trans_id = getattr(instance, self.field.attname)
if trans is None and trans_id is not None:
# This locale doesn't have a translation set, but there are
# translations in another locale, so we have an id already.
translation = self.model.new(string, lang, id=trans_id)
elif to_language(trans.locale) == lang:
# Replace the translation in the current language.
trans.localized_string = string
translation = trans
else:
# We already have a translation in a different language.
translation = self.model.new(string, lang, id=trans.id)
except AttributeError:
# Create a brand new translation.
translation = self.model.new(string, lang)
# A new translation has been created and it might need to be saved.
# This adds the translation to the queue of translation that need
# to be saved for this instance.
add_translation(make_key(instance), translation)
return translation
def translation_from_dict(self, instance, lang, dict_):
"""
Create Translations from a {'locale': 'string'} mapping.
If one of the locales matches lang, that Translation will be returned.
"""
rv = None
for locale, string in dict_.items():
loc = mkt_to_language(locale)
if loc not in settings.AMO_LANGUAGES:
continue
# The Translation is created and saved in here.
trans = self.translation_from_string(instance, locale, string)
# Set the Translation on the object because translation_from_string
# doesn't expect Translations to be created but not attached.
self.__set__(instance, trans)
# If we're setting the current locale, set it to the object so
# callers see the expected effect.
if to_language(locale) == lang:
rv = trans
return rv
class _TransField(object):
def __init__(self, *args, **kwargs):
# TranslationFormMixin will override self.default_locale on every field
# using the same fallback system that the translation db queries use.
self.default_locale = settings.LANGUAGE_CODE
for k in ('queryset', 'to_field_name', 'limit_choices_to'):
if k in kwargs:
del kwargs[k]
self.widget = kwargs.pop('widget', TransInput)
super(_TransField, self).__init__(*args, **kwargs)
def clean(self, value):
errors = LocaleList()
value = dict((k, v.strip() if v else v) for (k, v) in value.items())
# Raise an exception if the default locale is required and not present
if self.default_locale.lower() not in value:
value[self.default_locale.lower()] = None
# Now, loop through them and validate them separately.
for locale, val in value.items():
try:
# Only the default locale can be required; all non-default
# fields are automatically optional.
if self.default_locale.lower() == locale:
super(_TransField, self).validate(val)
super(_TransField, self).run_validators(val)
except forms.ValidationError, e:
errors.extend(e.messages, locale)
if errors:
raise LocaleValidationError(errors)
return value
def _has_changed(self, initial, data):
# This used to be called on the field's widget and always returned
# False!
return False
class LocaleValidationError(forms.ValidationError):
def __init__(self, messages, code=None, params=None):
self.msgs = messages
@property
def messages(self):
return self.msgs
class TransField(_TransField, forms.CharField):
"""
A CharField subclass that can deal with multiple locales.
Most validators are run over the data for each locale. The required
validator is only run on the default_locale, which is hooked up to the
instance with TranslationFormMixin.
"""
@staticmethod
def adapt(cls, opts={}):
"""Get a new TransField that subclasses cls instead of CharField."""
return type('Trans%s' % cls.__name__, (_TransField, cls), opts)
# Subclass list so that isinstance(list) in Django works.
class LocaleList(dict):
"""
List-like objects that maps list elements to a locale.
>>> LocaleList([1, 2], 'en')
[1, 2]
['en', 'en']
This is useful for validation error lists where we want to associate an
error with a locale.
"""
def __init__(self, seq=None, locale=None):
self.seq, self.locales = [], []
if seq:
assert seq and locale
self.extend(seq, locale)
def __iter__(self):
return iter(self.zip())
def extend(self, seq, locale):
self.seq.extend(seq)
self.locales.extend([locale] * len(seq))
def __nonzero__(self):
return bool(self.seq)
def __contains__(self, item):
return item in self.seq
def zip(self):
return zip(self.locales, self.seq)
def save_signal(sender, instance, **kw):
"""
Use this signal on a model to iterate through all the translations added
to the hold queue and save them all. Hook this up to the pre_save signal
on the model.
"""
if not kw.get('raw'):
save_translations(make_key(instance))
| |
"""Test Home Assistant util methods."""
# pylint: disable=too-many-public-methods
import unittest
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from datetime import datetime, timedelta
from homeassistant import util
import homeassistant.util.dt as dt_util
class TestUtil(unittest.TestCase):
"""Test util methods."""
def test_sanitize_filename(self):
"""Test sanitize_filename."""
self.assertEqual("test", util.sanitize_filename("test"))
self.assertEqual("test", util.sanitize_filename("/test"))
self.assertEqual("test", util.sanitize_filename("..test"))
self.assertEqual("test", util.sanitize_filename("\\test"))
self.assertEqual("test", util.sanitize_filename("\\../test"))
def test_sanitize_path(self):
"""Test sanitize_path."""
self.assertEqual("test/path", util.sanitize_path("test/path"))
self.assertEqual("test/path", util.sanitize_path("~test/path"))
self.assertEqual("//test/path",
util.sanitize_path("~/../test/path"))
def test_slugify(self):
"""Test slugify."""
self.assertEqual("test", util.slugify("T-!@#$!#@$!$est"))
self.assertEqual("test_more", util.slugify("Test More"))
self.assertEqual("test_more", util.slugify("Test_(More)"))
def test_repr_helper(self):
"""Test repr_helper."""
self.assertEqual("A", util.repr_helper("A"))
self.assertEqual("5", util.repr_helper(5))
self.assertEqual("True", util.repr_helper(True))
self.assertEqual("test=1",
util.repr_helper({"test": 1}))
self.assertEqual("1986-07-09T12:00:00+00:00",
util.repr_helper(datetime(1986, 7, 9, 12, 0, 0)))
def test_convert(self):
"""Test convert."""
self.assertEqual(5, util.convert("5", int))
self.assertEqual(5.0, util.convert("5", float))
self.assertEqual(True, util.convert("True", bool))
self.assertEqual(1, util.convert("NOT A NUMBER", int, 1))
self.assertEqual(1, util.convert(None, int, 1))
self.assertEqual(1, util.convert(object, int, 1))
def test_ensure_unique_string(self):
"""Test ensure_unique_string."""
self.assertEqual(
"Beer_3",
util.ensure_unique_string("Beer", ["Beer", "Beer_2"]))
self.assertEqual(
"Beer",
util.ensure_unique_string("Beer", ["Wine", "Soda"]))
def test_ordered_enum(self):
"""Test the ordered enum class."""
class TestEnum(util.OrderedEnum):
"""Test enum that can be ordered."""
FIRST = 1
SECOND = 2
THIRD = 3
self.assertTrue(TestEnum.SECOND >= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND >= TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND >= TestEnum.THIRD)
self.assertTrue(TestEnum.SECOND > TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND > TestEnum.SECOND)
self.assertFalse(TestEnum.SECOND > TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND <= TestEnum.FIRST)
self.assertTrue(TestEnum.SECOND <= TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND <= TestEnum.THIRD)
self.assertFalse(TestEnum.SECOND < TestEnum.FIRST)
self.assertFalse(TestEnum.SECOND < TestEnum.SECOND)
self.assertTrue(TestEnum.SECOND < TestEnum.THIRD)
# Python will raise a TypeError if the <, <=, >, >= methods
# raise a NotImplemented error.
self.assertRaises(TypeError,
lambda x, y: x < y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x <= y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x > y, TestEnum.FIRST, 1)
self.assertRaises(TypeError,
lambda x, y: x >= y, TestEnum.FIRST, 1)
def test_ordered_set(self):
"""Test ordering of set."""
set1 = util.OrderedSet([1, 2, 3, 4])
set2 = util.OrderedSet([3, 4, 5])
self.assertEqual(4, len(set1))
self.assertEqual(3, len(set2))
self.assertIn(1, set1)
self.assertIn(2, set1)
self.assertIn(3, set1)
self.assertIn(4, set1)
self.assertNotIn(5, set1)
self.assertNotIn(1, set2)
self.assertNotIn(2, set2)
self.assertIn(3, set2)
self.assertIn(4, set2)
self.assertIn(5, set2)
set1.add(5)
self.assertIn(5, set1)
set1.discard(5)
self.assertNotIn(5, set1)
# Try again while key is not in
set1.discard(5)
self.assertNotIn(5, set1)
self.assertEqual([1, 2, 3, 4], list(set1))
self.assertEqual([4, 3, 2, 1], list(reversed(set1)))
self.assertEqual(1, set1.pop(False))
self.assertEqual([2, 3, 4], list(set1))
self.assertEqual(4, set1.pop())
self.assertEqual([2, 3], list(set1))
self.assertEqual('OrderedSet()', str(util.OrderedSet()))
self.assertEqual('OrderedSet([2, 3])', str(set1))
self.assertEqual(set1, util.OrderedSet([2, 3]))
self.assertNotEqual(set1, util.OrderedSet([3, 2]))
self.assertEqual(set1, set([2, 3]))
self.assertEqual(set1, {3, 2})
self.assertEqual(set1, [2, 3])
self.assertEqual(set1, [3, 2])
self.assertNotEqual(set1, {2})
set3 = util.OrderedSet(set1)
set3.update(set2)
self.assertEqual([3, 4, 5, 2], set3)
self.assertEqual([3, 4, 5, 2], set1 | set2)
self.assertEqual([3], set1 & set2)
self.assertEqual([2], set1 - set2)
set1.update([1, 2], [5, 6])
self.assertEqual([2, 3, 1, 5, 6], set1)
def test_throttle(self):
"""Test the add cooldown decorator."""
calls1 = []
calls2 = []
@util.Throttle(timedelta(seconds=4))
def test_throttle1():
calls1.append(1)
@util.Throttle(timedelta(seconds=4), timedelta(seconds=2))
def test_throttle2():
calls2.append(1)
now = dt_util.utcnow()
plus3 = now + timedelta(seconds=3)
plus5 = plus3 + timedelta(seconds=2)
# Call first time and ensure methods got called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call second time. Methods should not get called
test_throttle1()
test_throttle2()
self.assertEqual(1, len(calls1))
self.assertEqual(1, len(calls2))
# Call again, overriding throttle, only first one should fire
test_throttle1(no_throttle=True)
test_throttle2(no_throttle=True)
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
with patch('homeassistant.util.utcnow', return_value=plus3):
test_throttle1()
test_throttle2()
self.assertEqual(2, len(calls1))
self.assertEqual(1, len(calls2))
with patch('homeassistant.util.utcnow', return_value=plus5):
test_throttle1()
test_throttle2()
self.assertEqual(3, len(calls1))
self.assertEqual(2, len(calls2))
def test_throttle_per_instance(self):
"""Test that the throttle method is done per instance of a class."""
class Tester(object):
"""A tester class for the throttle."""
@util.Throttle(timedelta(seconds=1))
def hello(self):
"""Test the throttle."""
return True
self.assertTrue(Tester().hello())
self.assertTrue(Tester().hello())
def test_throttle_on_method(self):
"""Test that throttle works when wrapping a method."""
class Tester(object):
"""A tester class for the throttle."""
def hello(self):
"""Test the throttle."""
return True
tester = Tester()
throttled = util.Throttle(timedelta(seconds=1))(tester.hello)
self.assertTrue(throttled())
self.assertIsNone(throttled())
def test_throttle_on_two_method(self):
"""Test that throttle works when wrapping two methods."""
class Tester(object):
"""A test class for the throttle."""
@util.Throttle(timedelta(seconds=1))
def hello(self):
"""Test the throttle."""
return True
@util.Throttle(timedelta(seconds=1))
def goodbye(self):
"""Test the throttle."""
return True
tester = Tester()
self.assertTrue(tester.hello())
self.assertTrue(tester.goodbye())
| |
"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the prefered format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.set_prefix(old_leaf1.get_prefix())
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.set_prefix('')
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return node
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.get_prefix()
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.set_prefix(' ')
else:
meta_txt.set_prefix('')
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].set_prefix('')
expr_stmt.children[2].set_prefix('')
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.set_prefix(orig_meta_prefix)
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| |
# -*- coding: utf-8 -*-
"""
Contains functions for the `Arrhenius equation
<https://en.wikipedia.org/wiki/Arrhenius_equation>`_
(:func:`arrhenius_equation`) and a convenience fitting routine
(:func:`fit_arrhenius_equation`).
"""
from .._util import get_backend
from ..util.regression import least_squares
from ..util.pyutil import defaultnamedtuple
from ..units import default_constants, default_units, format_string, patched_numpy
try:
import numpy as np
except ImportError:
np = None
def _fit_linearized(backtransfm, lin_x, lin_y, lin_yerr):
if len(lin_x) != len(lin_y):
raise ValueError("k and T needs to be of equal length.")
if lin_yerr is not None:
if len(lin_yerr) != len(lin_y):
raise ValueError("kerr and T needs to be of equal length.")
lin_p, lin_vcv, lin_r2 = least_squares(lin_x, lin_y, lin_yerr)
return [cb(lin_p) for cb in backtransfm]
def _fit(T, k, kerr, func, lin_x, lin_y, backtransfm, linearized=False):
_lin_y = lin_y(T, k)
if kerr is None:
lin_yerr = 1
else:
lin_yerr = (
abs(lin_y(k - kerr, T) - _lin_y) + abs(lin_y(k + kerr, T) - _lin_y)
) / 2
lopt = _fit_linearized(backtransfm, lin_x(T, k), _lin_y, lin_yerr)
if linearized:
return lopt
from scipy.optimize import curve_fit
popt, pcov = curve_fit(func, T, k, lopt, kerr)
return popt, pcov
def _get_R(constants=None, units=None):
if constants is None:
R = 8.314472
if units is not None:
J = units.Joule
K = units.Kelvin
mol = units.mol
R *= J / mol / K
else:
R = constants.molar_gas_constant.simplified
return R
def arrhenius_equation(A, Ea, T, constants=None, units=None, backend=None):
"""
Returns the rate coefficient according to the Arrhenius equation
Parameters
----------
A: float with unit
frequency factor
Ea: float with unit
activation energy
T: float with unit
temperature
constants: object (optional, default: None)
if None:
T assumed to be in Kelvin, Ea in J/(K mol)
else:
attributes accessed: molar_gas_constant
Tip: pass quantities.constants
units: object (optional, default: None)
attributes accessed: Joule, Kelvin and mol
backend: module (optional)
module with "exp", default: numpy, math
"""
be = get_backend(backend)
R = _get_R(constants, units)
try:
RT = (R * T).rescale(Ea.dimensionality)
except AttributeError:
RT = R * T
return A * be.exp(-Ea / RT)
def fit_arrhenius_equation(
T, k, kerr=None, linearized=False, constants=None, units=None
):
"""Curve fitting of the Arrhenius equation to data points
Parameters
----------
T : float
k : array_like
kerr : array_like (optional)
linearized : bool
"""
return _fit(
T,
k,
kerr,
arrhenius_equation,
lambda T, k: 1 / T,
lambda T, k: np.log(k),
[lambda p: np.exp(p[0]), lambda p: -p[1] * _get_R(constants, units)],
linearized=linearized,
)
def _fit_arrhenius_equation(T, k, kerr=None, linearized=False):
"""Curve fitting of the Arrhenius equation to data points
Parameters
----------
k : array_like
T : float
kerr : array_like (optional)
linearized : bool
"""
if len(k) != len(T):
raise ValueError("k and T needs to be of equal length.")
from math import exp
import numpy as np
p = np.polyfit(1 / T, np.log(k), 1)
R = _get_R(constants=None, units=None)
Ea = -R * p[0]
A = exp(p[1])
if linearized:
return A, Ea
from scipy.optimize import curve_fit
if kerr is None:
weights = None
else:
weights = 1 / kerr ** 2
popt, pcov = curve_fit(arrhenius_equation, T, k, [A, Ea], weights)
return popt, pcov
class ArrheniusParam(defaultnamedtuple("ArrheniusParam", "A Ea ref", [None])):
"""Kinetic data in the form of an Arrhenius parameterisation
Parameters
----------
Ea: float
activation energy
A: float
preexponential prefactor (Arrhenius type eq.)
ref: object (default: None)
arbitrary reference (e.g. string representing citation key)
Examples
--------
>>> k = ArrheniusParam(1e13, 40e3)
>>> '%.5g' % k(298.15)
'9.8245e+05'
"""
def html(self, fmt):
return "%s exp((%s)/(RT))" % (fmt(self.A), fmt(self.Ea))
def unicode(self, fmt):
return "%s exp((%s)/(RT))" % (fmt(self.A), fmt(self.Ea))
@classmethod
def from_rateconst_at_T(
cls, Ea, T_k, backend=None, constants=None, units=None, **kwargs
):
"""Constructs an instance from a known rate constant at a given temperature.
Parameters
----------
Ea : float
Activation energy.
T_k : tuple of two floats
Temperature & rate constant.
"""
# k = A*exp(-Ea/R/T)
# A = k*exp(Ea/R/T)
T, k = T_k
R = _get_R(constants, units)
if backend is None:
from chempy.units import patched_numpy as backend
return cls(k * backend.exp(Ea / R / T), Ea, **kwargs)
@classmethod
def from_fit_of_data(cls, T, k, kerr=None, **kwargs):
args, vcv = fit_arrhenius_equation(T, k, kerr)
return cls(*args, **kwargs)
def __call__(self, T, constants=None, units=None, backend=None):
"""Evaluates the arrhenius equation for a specified state
Parameters
----------
T: float
constants: module (optional)
units: module (optional)
backend: module (default: math)
See also
--------
chempy.arrhenius.arrhenius_equation : the function called here.
"""
return arrhenius_equation(
self.A, self.Ea, T, constants=constants, units=units, backend=backend
)
def Ea_over_R(self, constants, units, backend=None):
return self.Ea / _get_R(constants, units)
def as_RateExpr(self, unique_keys=None, constants=None, units=None, backend=None):
from .rates import Arrhenius, MassAction
args = [self.A, self.Ea_over_R(constants, units)]
return MassAction(Arrhenius(args, unique_keys))
def format(self, precision, tex=False):
try:
str_A, str_A_unit = format_string(self.A, precision, tex)
str_Ea, str_Ea_unit = format_string(self.Ea, precision, tex)
except Exception:
str_A, str_A_unit = precision.format(self.A), "-"
str_Ea, str_Ea_unit = precision.format(self.Ea), "-"
return (str_A, str_A_unit), (str_Ea, str_Ea_unit)
def equation_as_string(self, precision, tex=False):
(str_A, str_A_unit), (str_Ea, str_Ea_unit) = self.format(precision, tex)
if tex:
return (
r"{}\exp \left(-\frac{{{}}}{{RT}} \right)".format(
str_A, str_Ea + " " + str_Ea_unit
),
str_A_unit,
)
else:
return (
"{}*exp(-{}/(R*T))".format(str_A, str_Ea + " " + str_Ea_unit),
str_A_unit,
)
def __str__(self):
return " ".join(self.equation_as_string("%.5g"))
class ArrheniusParamWithUnits(ArrheniusParam):
@classmethod
def from_rateconst_at_T(cls, *args, **kwargs):
if "constants" not in kwargs:
kwargs["constants"] = default_constants
if "units" not in kwargs:
kwargs["units"] = default_units
if "backend" not in kwargs:
kwargs["backend"] = patched_numpy
return super(ArrheniusParamWithUnits, cls).from_rateconst_at_T(*args, **kwargs)
def __call__(
self, state, constants=default_constants, units=default_units, backend=None
):
""" See :func:`chempy.arrhenius.arrhenius_equation`. """
return super(ArrheniusParamWithUnits, self).__call__(
state, constants, units, backend
)
def as_RateExpr(
self, unique_keys=None, constants=default_constants, units=default_units
):
return super(ArrheniusParamWithUnits, self).as_RateExpr(
unique_keys, constants, units
)
| |
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos2d "compile" plugin
#
# Copyright 2013 (C) Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"compile" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import multiprocessing
import cocos
from MultiLanguage import MultiLanguage
import cocos_project
import subprocess
import os
import re
import sys
import shutil
import platform
import json
import build_web
if sys.platform == 'win32':
import _winreg
class CCPluginCompile(cocos.CCPlugin):
"""
compiles a project
"""
BUILD_CONFIG_FILE = "build-cfg.json"
CFG_KEY_WIN32_COPY_FILES = "copy_files"
CFG_KEY_WIN32_MUST_COPY_FILES = "must_copy_files"
CFG_KEY_COPY_RESOURCES = "copy_resources"
CFG_KEY_MUST_COPY_RESOURCES = "must_copy_resources"
OUTPUT_DIR_NATIVE = "bin"
OUTPUT_DIR_SCRIPT_DEBUG = "simulator"
OUTPUT_DIR_SCRIPT_RELEASE = "publish"
WEB_PLATFORM_FOLDER_NAME = "html5"
PROJ_CFG_KEY_IOS_SIGN_ID = "ios_sign_id"
PROJ_CFG_KEY_ENGINE_DIR = "engine_dir"
BACKUP_SUFFIX = "-backup"
ENGINE_JS_DIRS = [
"frameworks/js-bindings/bindings/script",
"cocos/scripting/js-bindings/script"
]
VS_VERSION_MAP = {
2012 : "11.0",
2013 : "12.0",
2015 : "14.0"
}
@staticmethod
def plugin_name():
return "compile"
@staticmethod
def brief_description():
return MultiLanguage.get_string('COMPILE_BRIEF')
def _add_custom_options(self, parser):
from argparse import ArgumentParser
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help=MultiLanguage.get_string('COMPILE_ARG_MODE'))
parser.add_argument("-j", "--jobs", dest="jobs", type=int,
help=MultiLanguage.get_string('COMPILE_ARG_JOBS'))
parser.add_argument("-o", "--output-dir", dest="output_dir",
help=MultiLanguage.get_string('COMPILE_ARG_OUTPUT'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_ANDROID'))
group.add_argument("--ap", dest="android_platform",
help=MultiLanguage.get_string('COMPILE_ARG_AP'))
group.add_argument("--ndk-mode", dest="ndk_mode",
help=MultiLanguage.get_string('COMPILE_ARG_NDK_MODE'))
group.add_argument("--app-abi", dest="app_abi",
help=MultiLanguage.get_string('COMPILE_ARG_APP_ABI'))
group.add_argument("--ndk-toolchain", dest="toolchain",
help=MultiLanguage.get_string('COMPILE_ARG_TOOLCHAIN'))
group.add_argument("--ndk-cppflags", dest="cppflags",
help=MultiLanguage.get_string('COMPILE_ARG_CPPFLAGS'))
group.add_argument("--android-studio", dest="use_studio", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_STUDIO'))
group.add_argument("--no-apk", dest="no_apk", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_NO_APK'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_WIN'))
group.add_argument("--vs", dest="vs_version", type=int,
help=MultiLanguage.get_string('COMPILE_ARG_VS'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_WEB'))
group.add_argument("--source-map", dest="source_map", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_SOURCE_MAP'))
group.add_argument("--advanced", dest="advanced", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_ADVANCE'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_IOS_MAC'))
group.add_argument("-t", "--target", dest="target_name",
help=MultiLanguage.get_string('COMPILE_ARG_TARGET'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_IOS'))
group.add_argument("--sign-identity", dest="sign_id",
help=MultiLanguage.get_string('COMPILE_ARG_IOS_SIGN'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_LUA_JS'))
group.add_argument("--no-res", dest="no_res", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_NO_RES'))
group.add_argument("--compile-script", dest="compile_script", type=int, choices=[0, 1],
help=MultiLanguage.get_string('COMPILE_ARG_COMPILE_SCRIPT'))
group = parser.add_argument_group(MultiLanguage.get_string('COMPILE_ARG_GROUP_LUA'))
group.add_argument("--lua-encrypt", dest="lua_encrypt", action="store_true",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT'))
group.add_argument("--lua-encrypt-key", dest="lua_encrypt_key",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT_KEY'))
group.add_argument("--lua-encrypt-sign", dest="lua_encrypt_sign",
help=MultiLanguage.get_string('COMPILE_ARG_LUA_ENCRYPT_SIGN'))
category = self.plugin_category()
name = self.plugin_name()
usage = "\n\t%%prog %s %s -p <platform> [-s src_dir][-m <debug|release>]" \
"\nSample:" \
"\n\t%%prog %s %s -p android" % (category, name, category, name)
def _check_custom_options(self, args):
# get the mode parameter
available_modes = [ 'release', 'debug' ]
self._mode = self.check_param(args.mode, 'debug', available_modes,
MultiLanguage.get_string('COMPILE_ERROR_WRONG_MODE_FMT',
available_modes))
# android arguments
available_ndk_modes = [ 'release', 'debug', 'none' ]
self._ndk_mode = self.check_param(args.ndk_mode, self._mode, available_ndk_modes,
MultiLanguage.get_string('COMPILE_ERROR_WRONG_NDK_MODE_FMT',
available_ndk_modes))
self._no_apk = args.no_apk
self.app_abi = None
if args.app_abi:
self.app_abi = " ".join(args.app_abi.split(":"))
self.cppflags = None
if args.cppflags:
self.cppflags = args.cppflags
self.ndk_toolchain = None
if args.toolchain:
self.ndk_toolchain = args.toolchain
self.use_studio = args.use_studio
# Win32 arguments
self.vs_version = args.vs_version
# iOS/Mac arguments
self.xcode_target_name = None
if args.target_name is not None:
self.xcode_target_name = args.target_name
if args.compile_script is not None:
self._compile_script = bool(args.compile_script)
else:
self._compile_script = (self._mode == "release")
self._ap = args.android_platform
if args.jobs is not None:
self._jobs = args.jobs
else:
self._jobs = self.get_num_of_cpu()
self._has_sourcemap = args.source_map
self._web_advanced = args.advanced
self._no_res = args.no_res
if args.output_dir is None:
self._output_dir = self._get_output_dir()
else:
if os.path.isabs(args.output_dir):
self._output_dir = args.output_dir
else:
self._output_dir = os.path.abspath(args.output_dir)
self._sign_id = args.sign_id
if self._project._is_lua_project():
self._lua_encrypt = args.lua_encrypt
self._lua_encrypt_key = args.lua_encrypt_key
self._lua_encrypt_sign = args.lua_encrypt_sign
self.end_warning = ""
self._gen_custom_step_args()
def check_param(self, value, default_value, available_values, error_msg, ignore_case=True):
if value is None:
return default_value
if ignore_case:
check_value = value.lower()
right_values = []
for v in available_values:
right_values.append(v.lower())
else:
check_value = value
right_values = available_values
if check_value in right_values:
return check_value
else:
raise cocos.CCPluginError(error_msg, cocos.CCPluginError.ERROR_WRONG_ARGS)
def get_num_of_cpu(self):
try:
return multiprocessing.cpu_count()
except Exception:
print MultiLanguage.get_string('COMPILE_DETECT_CPU_FAILED')
return 1
def _get_output_dir(self):
project_dir = self._project.get_project_dir()
cur_platform = self._platforms.get_current_platform()
if self._project._is_script_project():
if self._project._is_js_project() and self._platforms.is_web_active():
cur_platform = CCPluginCompile.WEB_PLATFORM_FOLDER_NAME
if self._mode == 'debug':
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_DEBUG, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_SCRIPT_RELEASE, cur_platform)
else:
output_dir = os.path.join(project_dir, CCPluginCompile.OUTPUT_DIR_NATIVE, self._mode, cur_platform)
return output_dir
def _gen_custom_step_args(self):
self._custom_step_args = {
"project-path": self._project.get_project_dir(),
"platform-project-path": self._platforms.project_path(),
"build-mode": self._mode,
"output-dir": self._output_dir
}
if self._platforms.is_android_active():
self._custom_step_args["ndk-build-mode"] = self._ndk_mode
def _build_cfg_path(self):
cur_cfg = self._platforms.get_current_config()
if self._platforms.is_win32_active():
if cur_cfg.build_cfg_path is not None:
project_dir = self._project.get_project_dir()
ret = os.path.join(project_dir, cur_cfg.build_cfg_path)
else:
ret = self._platforms.project_path()
elif self._platforms.is_ios_active():
ret = os.path.join(self._platforms.project_path(), "ios")
elif self._platforms.is_mac_active():
ret = os.path.join(self._platforms.project_path(), "mac")
else:
ret = self._platforms.project_path()
return ret
def _update_build_cfg(self):
build_cfg_dir = self._build_cfg_path()
cfg_file_path = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.isfile(cfg_file_path):
return
key_of_copy = None
key_of_must_copy = None
if self._platforms.is_android_active():
from build_android import AndroidBuilder
key_of_copy = AndroidBuilder.CFG_KEY_COPY_TO_ASSETS
key_of_must_copy = AndroidBuilder.CFG_KEY_MUST_COPY_TO_ASSERTS
elif self._platforms.is_win32_active():
key_of_copy = CCPluginCompile.CFG_KEY_WIN32_COPY_FILES
key_of_must_copy = CCPluginCompile.CFG_KEY_WIN32_MUST_COPY_FILES
if key_of_copy is None and key_of_must_copy is None:
return
try:
outfile = None
open_file = open(cfg_file_path)
cfg_info = json.load(open_file)
open_file.close()
open_file = None
changed = False
if key_of_copy is not None:
if cfg_info.has_key(key_of_copy):
src_list = cfg_info[key_of_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_COPY_RESOURCES] = ret_list
del cfg_info[key_of_copy]
changed = True
if key_of_must_copy is not None:
if cfg_info.has_key(key_of_must_copy):
src_list = cfg_info[key_of_must_copy]
ret_list = self._convert_cfg_list(src_list, build_cfg_dir)
cfg_info[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES] = ret_list
del cfg_info[key_of_must_copy]
changed = True
if changed:
# backup the old-cfg
split_list = os.path.splitext(CCPluginCompile.BUILD_CONFIG_FILE)
file_name = split_list[0]
ext_name = split_list[1]
bak_name = file_name + "-for-v0.1" + ext_name
bak_file_path = os.path.join(build_cfg_dir, bak_name)
if os.path.exists(bak_file_path):
os.remove(bak_file_path)
os.rename(cfg_file_path, bak_file_path)
# write the new data to file
with open(cfg_file_path, 'w') as outfile:
json.dump(cfg_info, outfile, sort_keys = True, indent = 4)
outfile.close()
outfile = None
finally:
if open_file is not None:
open_file.close()
if outfile is not None:
outfile.close()
def _convert_cfg_list(self, src_list, build_cfg_dir):
ret = []
for element in src_list:
ret_element = {}
if str(element).endswith("/"):
sub_str = element[0:len(element)-1]
ret_element["from"] = sub_str
ret_element["to"] = ""
else:
element_full_path = os.path.join(build_cfg_dir, element)
if os.path.isfile(element_full_path):
to_dir = ""
else:
to_dir = os.path.basename(element)
ret_element["from"] = element
ret_element["to"] = to_dir
ret.append(ret_element)
return ret
def _is_debug_mode(self):
return self._mode == 'debug'
def _remove_file_with_ext(self, work_dir, ext):
file_list = os.listdir(work_dir)
for f in file_list:
full_path = os.path.join(work_dir, f)
if os.path.isdir(full_path):
self._remove_file_with_ext(full_path, ext)
elif os.path.isfile(full_path):
name, cur_ext = os.path.splitext(f)
if cur_ext == ext:
os.remove(full_path)
def compile_lua_scripts(self, src_dir, dst_dir, need_compile=None):
if not self._project._is_lua_project():
return
if need_compile is None:
need_compile = self._compile_script
if not need_compile and not self._lua_encrypt:
return
cocos_cmd_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "cocos")
rm_ext = ".lua"
compile_cmd = "\"%s\" luacompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
if not need_compile:
compile_cmd = "%s --disable-compile" % compile_cmd
if self._lua_encrypt:
add_para = ""
if self._lua_encrypt_key is not None:
add_para = "%s -k %s" % (add_para, self._lua_encrypt_key)
if self._lua_encrypt_sign is not None:
add_para = "%s -b %s" % (add_para, self._lua_encrypt_sign)
compile_cmd = "%s -e %s" % (compile_cmd, add_para)
# run compile command
self._run_cmd(compile_cmd)
# remove the source scripts
self._remove_file_with_ext(dst_dir, rm_ext)
def compile_js_scripts(self, src_dir, dst_dir):
if not self._project._is_js_project():
return
if not self._compile_script:
return
cocos_cmd_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "cocos")
rm_ext = ".js"
compile_cmd = "\"%s\" jscompile -s \"%s\" -d \"%s\"" % (cocos_cmd_path, src_dir, dst_dir)
# run compile command
self._run_cmd(compile_cmd)
# remove the source scripts
self._remove_file_with_ext(dst_dir, rm_ext)
def add_warning_at_end(self, warning_str):
if warning_str is None or len(warning_str) == 0:
return
self.end_warning = "%s\n%s" % (self.end_warning, warning_str)
def is_valid_path(self, p):
if (p is not None) and os.path.exists(p):
ret = True
else:
ret = False
return ret
def build_android(self):
if not self._platforms.is_android_active():
return
project_dir = self._project.get_project_dir()
build_mode = self._mode
output_dir = self._output_dir
# get the android project path
# if both proj.android & proj.android-studio existed, select the project path by --studio argument
# else, use the existed one.
cfg_obj = self._platforms.get_current_config()
proj_android_path = cfg_obj.proj_path
proj_studio_path = cfg_obj.studio_path
project_android_dir = None
using_studio = False
if self.is_valid_path(proj_android_path) and self.is_valid_path(proj_studio_path):
if self.use_studio:
project_android_dir = proj_studio_path
using_studio = True
else:
project_android_dir = proj_android_path
using_studio = False
elif self.is_valid_path(proj_android_path):
project_android_dir = proj_android_path
using_studio = False
elif self.is_valid_path(proj_studio_path):
project_android_dir = proj_studio_path
using_studio = True
if using_studio:
ide_name = 'Android Studio'
else:
ide_name = 'Eclipse'
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_ANDROID_PROJPATH_FMT', (ide_name, project_android_dir)))
from build_android import AndroidBuilder
builder = AndroidBuilder(self._verbose, project_android_dir,
self._no_res, self._project, using_studio)
args_ndk_copy = self._custom_step_args.copy()
target_platform = self._platforms.get_current_platform()
# update the project with the android platform
builder.update_project(self._ap)
if not self._project._is_script_project() or self._project._is_native_support():
if self._ndk_mode != "none":
# build native code
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_NATIVE'))
ndk_build_param = [
"-j%s" % self._jobs
]
if self.app_abi:
abi_param = "APP_ABI=\"%s\"" % self.app_abi
ndk_build_param.append(abi_param)
if self.ndk_toolchain:
toolchain_param = "NDK_TOOLCHAIN=%s" % self.ndk_toolchain
ndk_build_param.append(toolchain_param)
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_NDK_BUILD, target_platform, args_ndk_copy)
modify_mk = False
if using_studio:
app_mk = os.path.join(project_android_dir, "app/jni/Application.mk")
else:
app_mk = os.path.join(project_android_dir, "jni/Application.mk")
mk_content = None
if self.cppflags and os.path.exists(app_mk):
# record the content of Application.mk
f = open(app_mk)
mk_content = f.read()
f.close()
# Add cpp flags
f = open(app_mk, "a")
f.write("\nAPP_CPPFLAGS += %s" % self.cppflags)
f.close()
modify_mk = True
try:
builder.do_ndk_build(ndk_build_param, self._ndk_mode, self)
except Exception as e:
if e.__class__.__name__ == 'CCPluginError':
raise e
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_NDK_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# roll-back the Application.mk
if modify_mk:
f = open(app_mk, "w")
f.write(mk_content)
f.close()
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_NDK_BUILD, target_platform, args_ndk_copy)
# build apk
if not self._no_apk:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_APK'))
self.apk_path = builder.do_build_apk(build_mode, self._no_apk, output_dir, self._custom_step_args, self)
self.android_package, self.android_activity = builder.get_apk_info()
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def check_ios_mac_build_depends(self):
version = cocos.get_xcode_version()
if version <= '5':
message = MultiLanguage.get_string('COMPILE_ERROR_UPDATE_XCODE')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
cfg_obj = self._platforms.get_current_config()
if cfg_obj.proj_file is not None:
xcodeproj_name = cfg_obj.proj_file
name = os.path.basename(xcodeproj_name)
else:
name, xcodeproj_name = self.checkFileByExtention(".xcodeproj", self._platforms.project_path())
if not xcodeproj_name:
message = MultiLanguage.get_string('COMPILE_ERROR_XCODEPROJ_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
self.project_name = name
self.xcodeproj_name = xcodeproj_name
def _remove_res(self, target_path):
build_cfg_dir = self._build_cfg_path()
cfg_file = os.path.join(build_cfg_dir, CCPluginCompile.BUILD_CONFIG_FILE)
if os.path.exists(cfg_file) and os.path.isfile(cfg_file):
# have config file
open_file = open(cfg_file)
cfg_info = json.load(open_file)
open_file.close()
if cfg_info.has_key("remove_res"):
remove_list = cfg_info["remove_res"]
for f in remove_list:
res = os.path.join(target_path, f)
if os.path.isdir(res):
# is a directory
if f.endswith('/'):
# remove files & dirs in it
for sub_file in os.listdir(res):
sub_file_fullpath = os.path.join(res, sub_file)
if os.path.isfile(sub_file_fullpath):
os.remove(sub_file_fullpath)
elif os.path.isdir(sub_file_fullpath):
shutil.rmtree(sub_file_fullpath)
else:
# remove the dir
shutil.rmtree(res)
elif os.path.isfile(res):
# is a file, remove it
os.remove(res)
def get_engine_dir(self):
engine_dir = self._project.get_proj_config(CCPluginCompile.PROJ_CFG_KEY_ENGINE_DIR)
if engine_dir is None:
proj_dir = self._project.get_project_dir()
if self._project._is_js_project():
check_dir = os.path.join(proj_dir, "frameworks", "cocos2d-x")
if os.path.isdir(check_dir):
# the case for jsb in cocos2d-x engine
engine_dir = check_dir
else:
# the case for jsb in cocos2d-js engine
engine_dir = proj_dir
elif self._project._is_lua_project():
engine_dir = os.path.join(proj_dir, "frameworks", "cocos2d-x")
else:
engine_dir = os.path.join(proj_dir, "cocos2d")
else:
engine_dir = os.path.join(self._project.get_project_dir(), engine_dir)
return engine_dir
def backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(backup_dir):
shutil.rmtree(backup_dir)
shutil.copytree(dir_path, backup_dir)
def reset_backup_dir(self, dir_path):
backup_dir = "%s%s" % (dir_path, CCPluginCompile.BACKUP_SUFFIX)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.rename(backup_dir, dir_path)
def get_engine_js_dir(self):
engine_js_dir = None
isFound = False
check_script_dir = os.path.join(self._project.get_project_dir(), "script")
if os.path.isdir(check_script_dir):
# JS script already copied into the project dir
engine_js_dir = check_script_dir
isFound = True
else:
for js_dir in CCPluginCompile.ENGINE_JS_DIRS:
engine_js_dir = os.path.join(self.get_engine_dir(), js_dir)
if os.path.isdir(engine_js_dir):
isFound = True
break
if isFound:
return engine_js_dir
else:
return None
def build_ios(self):
if not self._platforms.is_ios_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_MAC'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
if self._sign_id is not None:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_IOS_SIGN_FMT', self._sign_id))
self.use_sdk = 'iphoneos'
else:
self.use_sdk = 'iphonesimulator'
self.check_ios_mac_build_depends()
ios_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(ios_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(r"Begin PBXProject section.*End PBXProject section", contents, re.S)
if section is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targetName = None
if self.xcode_target_name is not None:
targetName = self.xcode_target_name
else:
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "iOS" in name or "-mobile" in name:
targetName = str.strip(name)
break
if targetName is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_IOS_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName)
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project, check whether compile scripts or not
need_reset_dir = False
if self._project._is_script_project():
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
if self._project._is_js_project() and self._compile_script:
# backup the source scripts
self.backup_dir(script_src_dir)
self.compile_js_scripts(script_src_dir, script_src_dir)
# js project need compile the js files in engine
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.backup_dir(engine_js_dir)
self.compile_js_scripts(engine_js_dir, engine_js_dir)
need_reset_dir = True
if self._project._is_lua_project() and self._lua_encrypt:
# on iOS, only invoke luacompile when lua encrypt is specified
self.backup_dir(script_src_dir)
self.compile_lua_scripts(script_src_dir, script_src_dir, False)
need_reset_dir = True
try:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"%s" % "-arch i386" if self.use_sdk == 'iphonesimulator' else '',
"-sdk",
"%s" % self.use_sdk,
"CONFIGURATION_BUILD_DIR=\"%s\"" % (output_dir),
"%s" % "VALID_ARCHS=\"i386\"" if self.use_sdk == 'iphonesimulator' else ''
])
if self._sign_id is not None:
command = "%s CODE_SIGN_IDENTITY=\"%s\"" % (command, self._sign_id)
self._run_cmd(command)
filelist = os.listdir(output_dir)
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
self._iosapp_path = os.path.join(output_dir, "%s.app" % targetName)
if self._no_res:
self._remove_res(self._iosapp_path)
if self._sign_id is not None:
# generate the ipa
app_path = os.path.join(output_dir, "%s.app" % targetName)
ipa_path = os.path.join(output_dir, "%s.ipa" % targetName)
ipa_cmd = "xcrun -sdk %s PackageApplication -v \"%s\" -o \"%s\"" % (self.use_sdk, app_path, ipa_path)
self._run_cmd(ipa_cmd)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# is script project & need reset dirs
if need_reset_dir:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.reset_backup_dir(engine_js_dir)
def build_mac(self):
if not self._platforms.is_mac_active():
return
if not cocos.os_is_mac():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_MAC'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
self.check_ios_mac_build_depends()
mac_project_dir = self._platforms.project_path()
output_dir = self._output_dir
projectPath = os.path.join(mac_project_dir, self.xcodeproj_name)
pbxprojectPath = os.path.join(projectPath, "project.pbxproj")
f = file(pbxprojectPath)
contents = f.read()
section = re.search(
r"Begin PBXProject section.*End PBXProject section",
contents,
re.S
)
if section is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targets = re.search(r"targets = (.*);", section.group(), re.S)
if targets is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
targetName = None
if self.xcode_target_name is not None:
targetName = self.xcode_target_name
else:
cfg_obj = self._platforms.get_current_config()
if cfg_obj.target_name is not None:
targetName = cfg_obj.target_name
else:
names = re.split("\*", targets.group())
for name in names:
if "Mac" in name or "-desktop" in name:
targetName = str.strip(name)
break
if targetName is None:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MAC_TARGET')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PARSE_FILE)
if os.path.isdir(output_dir):
target_app_dir = os.path.join(output_dir, "%s.app" % targetName)
if os.path.isdir(target_app_dir):
shutil.rmtree(target_app_dir)
# is script project, check whether compile scripts or not
need_reset_dir = False
if self._project._is_script_project():
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
if self._project._is_js_project() and self._compile_script:
# backup the source scripts
self.backup_dir(script_src_dir)
self.compile_js_scripts(script_src_dir, script_src_dir)
# js project need compile the js files in engine
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.backup_dir(engine_js_dir)
self.compile_js_scripts(engine_js_dir, engine_js_dir)
need_reset_dir = True
if self._project._is_lua_project() and (self._lua_encrypt or self._compile_script):
# on iOS, only invoke luacompile when lua encrypt is specified
self.backup_dir(script_src_dir)
self.compile_lua_scripts(script_src_dir, script_src_dir)
need_reset_dir = True
try:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
command = ' '.join([
"xcodebuild",
"-project",
"\"%s\"" % projectPath,
"-configuration",
"%s" % 'Debug' if self._mode == 'debug' else 'Release',
"-target",
"\"%s\"" % targetName,
"CONFIGURATION_BUILD_DIR=\"%s\"" % (output_dir)
])
self._run_cmd(command)
self.target_name = targetName
filelist = os.listdir(output_dir)
for filename in filelist:
name, extention = os.path.splitext(filename)
if extention == '.a':
filename = os.path.join(output_dir, filename)
os.remove(filename)
self._macapp_path = os.path.join(output_dir, "%s.app" % targetName)
if self._no_res:
resource_path = os.path.join(self._macapp_path, "Contents", "Resources")
self._remove_res(resource_path)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_FAILED'),
cocos.CCPluginError.ERROR_BUILD_FAILED)
finally:
# is script project & need reset dirs
if need_reset_dir:
script_src_dir = os.path.join(self._project.get_project_dir(), "src")
self.reset_backup_dir(script_src_dir)
if self._project._is_js_project():
engine_js_dir = self.get_engine_js_dir()
if engine_js_dir is not None:
self.reset_backup_dir(engine_js_dir)
def _get_required_vs_version(self, proj_file):
# get the VS version required by the project
# file_obj = open(proj_file)
# pattern = re.compile(r"^# Visual Studio.+(\d{4})")
# num = None
# for line in file_obj:
# match = pattern.match(line)
# if match is not None:
# num = match.group(1)
# break
#
# if num is not None:
# if num == "2012":
# ret = "11.0"
# elif num == "2013":
# ret = "12.0"
# else:
# ret = None
# else:
# ret = None
if self._platforms.is_wp8_active() or self._platforms.is_wp8_1_active() or self._platforms.is_metro_active():
# WP8 project required VS 2013
return "12.0"
else:
# win32 project required VS 2012
return "11.0"
def _get_vs_path(self, require_version, specify_vs_ver=None):
# find the VS in register, if system is 64bit, should find vs in both 32bit & 64bit register
if cocos.os_is_32bit_windows():
reg_flag_list = [ _winreg.KEY_WOW64_32KEY ]
else:
reg_flag_list = [ _winreg.KEY_WOW64_64KEY, _winreg.KEY_WOW64_32KEY ]
needUpgrade = False
find_Path = None
find_ver = 0
version_pattern = re.compile(r'(\d+)\.(\d+)')
for reg_flag in reg_flag_list:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_FIND_IN_REG_FMT',
("32bit" if reg_flag == _winreg.KEY_WOW64_32KEY else "64bit")))
try:
vs = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\VisualStudio",
0,
_winreg.KEY_READ | reg_flag
)
except:
continue
if specify_vs_ver is None:
# find VS
i = 0
while True:
# enum the keys in vs reg
try:
version = _winreg.EnumKey(vs, i)
except:
break
i += 1
match = re.match(version_pattern, version)
if match is None:
continue
# find the vs which version >= required version
try:
ver_float = float('%s.%s' % (match.group(1), match.group(2)))
if ver_float >= float(require_version):
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
if (find_Path is None) or (ver_float > find_ver):
find_Path = vsPath
find_ver = ver_float
except:
pass
else:
# find specified VS
version = CCPluginCompile.VS_VERSION_MAP[specify_vs_ver]
try:
key = _winreg.OpenKey(vs, r"SxS\VS7")
vsPath, type = _winreg.QueryValueEx(key, version)
if os.path.exists(vsPath):
find_Path = vsPath
find_ver = float(version)
except:
pass
if find_Path is not None:
break
if find_ver > float(require_version):
needUpgrade = True
return (needUpgrade, find_Path)
def _get_msbuild_version(self):
try:
reg_path = r'SOFTWARE\Microsoft\MSBuild'
reg_key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
reg_path
)
try:
i = 0
while True:
reg_subkey = _winreg.EnumKey(reg_key, i)
yield reg_subkey
i += 1
except:
pass
except WindowsError as e:
message = MultiLanguage.get_string('COMPILE_ERROR_NO_MSBUILD')
print(e)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
def _get_newest_msbuild_version(self):
newest_version = None
newest_version_number = 0
version_pattern = re.compile('(\\d+)\\.(\\d+)')
for version in self._get_msbuild_version():
if version:
match = version_pattern.match(version)
if match:
version_number = int(match.group(1)) * 10 + int(match.group(2))
if version_number > newest_version_number:
newest_version_number = version_number
newest_version = version
return newest_version
def _get_msbuild_path(self, specify_vs_ver=None):
if specify_vs_ver is None:
use_ver = self._get_newest_msbuild_version()
else:
use_ver = CCPluginCompile.VS_VERSION_MAP[specify_vs_ver]
if use_ver:
try:
reg_path = r'SOFTWARE\Microsoft\MSBuild\ToolsVersions\%s' % use_ver
reg_key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
reg_path
)
reg_value, reg_value_type = _winreg.QueryValueEx(reg_key, 'MSBuildToolsPath')
except:
reg_value = None
pass
return reg_value
else:
return None
def build_vs_project(self, sln_file, project_name, build_mode, specify_vs_ver=None):
# get the required VS version
required_vs_version = self._get_required_vs_version(sln_file)
if required_vs_version is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_PARSE_SLN_FAILED'),
cocos.CCPluginError.ERROR_PARSE_FILE)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_REQUIRED_VS_FMT', required_vs_version))
# check specified vs version
if specify_vs_ver is not None:
if specify_vs_ver in CCPluginCompile.VS_VERSION_MAP:
ver_float = float(CCPluginCompile.VS_VERSION_MAP[specify_vs_ver])
require_ver_float = float(required_vs_version)
if ver_float < require_ver_float:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_LOW_VS_VER'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_WRONG_VS_VER_FMT', specify_vs_ver),
cocos.CCPluginError.ERROR_WRONG_ARGS)
# get the correct available VS path
needUpgrade, vsPath = self._get_vs_path(required_vs_version, specify_vs_ver)
if vsPath is None:
message = MultiLanguage.get_string('COMPILE_ERROR_VS_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_FIND_VS_PATH_FMT', vsPath))
commandPath = os.path.join(vsPath, "Common7", "IDE", "devenv.com")
if os.path.exists(commandPath):
# upgrade projects
if needUpgrade:
commandUpgrade = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % sln_file,
"/Upgrade"
])
self._run_cmd(commandUpgrade)
# build the project
commands = ' '.join([
"\"%s\"" % commandPath,
"\"%s\"" % sln_file,
"/Build \"%s\"" % build_mode,
"/Project \"%s\"" % project_name
])
self._run_cmd(commands)
else:
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_DEVENV_NOT_FOUND'))
msbuild_path = self._get_msbuild_path(specify_vs_ver)
if msbuild_path:
msbuild_path = os.path.join(msbuild_path, 'MSBuild.exe')
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_FIND_MSBUILD_FMT', msbuild_path))
job_number = 2
build_command = ' '.join([
'\"%s\"' % msbuild_path,
'\"%s\"' % sln_file,
'/target:%s' % project_name,
'/property:Configuration=%s' % build_mode,
'/maxcpucount:%s' % job_number
])
self._run_cmd(build_command)
else:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_MSDBUILD_NOT_FOUND'),
cocos.CCPluginError.ERROR_TOOLS_NOT_FOUND)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def build_win32(self):
if not self._platforms.is_win32_active():
return
if not cocos.os_is_win32():
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_BUILD_ON_WIN'),
cocos.CCPluginError.ERROR_WRONG_ARGS)
win32_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", win32_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# build the project
self.project_name = name
projectPath = os.path.join(win32_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
# copy files
build_folder_name = "%s.win32" % build_mode
build_folder_path = os.path.join(win32_projectdir, build_folder_name)
if not os.path.isdir(build_folder_path):
message = MultiLanguage.get_string('COMPILE_ERROR_BUILD_PATH_NOT_FOUND_FMT', build_folder_path)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# remove the files in output dir (keep the exe files)
if os.path.exists(output_dir):
output_files = os.listdir(output_dir)
for element in output_files:
ele_full_path = os.path.join(output_dir, element)
if os.path.isfile(ele_full_path):
base_name, file_ext = os.path.splitext(element)
if not file_ext == ".exe":
os.remove(ele_full_path)
elif os.path.isdir(ele_full_path):
shutil.rmtree(ele_full_path)
# create output dir if it not existed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if cfg_obj.exe_out_dir is None:
exe_out_dir = build_folder_path
else:
exe_out_dir = os.path.join(build_folder_path, cfg_obj.exe_out_dir)
# copy exe
files = os.listdir(exe_out_dir)
proj_exe_name = "%s.exe" % self.project_name
for filename in files:
if filename == proj_exe_name:
file_path = os.path.join(exe_out_dir, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
break
# copy dll
files = os.listdir(build_folder_path)
for filename in files:
name, ext = os.path.splitext(filename)
if ext == '.dll':
file_path = os.path.join(build_folder_path, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
# copy lua files & res
build_cfg_path = self._build_cfg_path()
build_cfg = os.path.join(build_cfg_path, CCPluginCompile.BUILD_CONFIG_FILE)
if not os.path.exists(build_cfg):
message = MultiLanguage.get_string('COMPILE_ERROR_FILE_NOT_FOUND_FMT', build_cfg)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
f = open(build_cfg)
data = json.load(f)
if data.has_key(CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES):
if self._no_res:
fileList = data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES] + data[CCPluginCompile.CFG_KEY_MUST_COPY_RESOURCES]
else:
fileList = data[CCPluginCompile.CFG_KEY_COPY_RESOURCES]
for cfg in fileList:
cocos.copy_files_with_config(cfg, build_cfg_path, output_dir)
# check the project config & compile the script files
if self._project._is_js_project():
self.compile_js_scripts(output_dir, output_dir)
if self._project._is_lua_project():
self.compile_lua_scripts(output_dir, output_dir)
self.run_root = output_dir
def build_web(self):
if not self._platforms.is_web_active():
return
project_dir = self._platforms.project_path()
# store env for run
cfg_obj = self._platforms.get_current_config()
if cfg_obj.run_root_dir is not None:
self.run_root = cfg_obj.run_root_dir
else:
self.run_root = project_dir
if cfg_obj.sub_url is not None:
self.sub_url = cfg_obj.sub_url
else:
self.sub_url = '/'
output_dir = CCPluginCompile.OUTPUT_DIR_SCRIPT_RELEASE
if self._is_debug_mode():
output_dir = CCPluginCompile.OUTPUT_DIR_SCRIPT_DEBUG
if not self._web_advanced:
return
self.sub_url = '%s%s/%s/' % (self.sub_url, output_dir, CCPluginCompile.WEB_PLATFORM_FOLDER_NAME)
f = open(os.path.join(project_dir, "project.json"))
project_json = json.load(f)
f.close()
engine_dir = os.path.join(project_json["engineDir"])
realEngineDir = os.path.normpath(os.path.join(project_dir, engine_dir))
publish_dir = os.path.normpath(os.path.join(project_dir, output_dir, CCPluginCompile.WEB_PLATFORM_FOLDER_NAME))
# need to config in options of command
buildOpt = {
"outputFileName" : "game.min.js",
"debug": "true" if self._is_debug_mode() else "false",
"compilationLevel" : "advanced" if self._web_advanced else "simple",
"sourceMapOpened" : True if self._has_sourcemap else False
}
if os.path.exists(publish_dir):
shutil.rmtree(publish_dir)
os.makedirs(publish_dir)
# generate build.xml
build_web.gen_buildxml(project_dir, project_json, publish_dir, buildOpt)
outputJsPath = os.path.join(publish_dir, buildOpt["outputFileName"])
if os.path.exists(outputJsPath) == True:
os.remove(outputJsPath)
# call closure compiler
ant_root = cocos.check_environment_variable('ANT_ROOT')
ant_path = os.path.join(ant_root, 'ant')
self._run_cmd("%s -f %s" % (ant_path, os.path.join(publish_dir, 'build.xml')))
# handle sourceMap
sourceMapPath = os.path.join(publish_dir, "sourcemap")
if os.path.exists(sourceMapPath):
smFile = open(sourceMapPath)
try:
smContent = smFile.read()
finally:
smFile.close()
dir_to_replace = project_dir
if cocos.os_is_win32():
dir_to_replace = project_dir.replace('\\', '\\\\')
smContent = smContent.replace(dir_to_replace, os.path.relpath(project_dir, publish_dir))
smContent = smContent.replace(realEngineDir, os.path.relpath(realEngineDir, publish_dir))
smContent = smContent.replace('\\\\', '/')
smContent = smContent.replace('\\', '/')
smFile = open(sourceMapPath, "w")
smFile.write(smContent)
smFile.close()
# handle project.json
del project_json["engineDir"]
del project_json["modules"]
del project_json["jsList"]
project_json_output_file = open(os.path.join(publish_dir, "project.json"), "w")
project_json_output_file.write(json.dumps(project_json))
project_json_output_file.close()
# handle index.html
indexHtmlFile = open(os.path.join(project_dir, "index.html"))
try:
indexContent = indexHtmlFile.read()
finally:
indexHtmlFile.close()
reg1 = re.compile(r'<script\s+src\s*=\s*("|\')[^"\']*CCBoot\.js("|\')\s*><\/script>')
indexContent = reg1.sub("", indexContent)
mainJs = project_json.get("main") or "main.js"
indexContent = indexContent.replace(mainJs, buildOpt["outputFileName"])
indexHtmlOutputFile = open(os.path.join(publish_dir, "index.html"), "w")
indexHtmlOutputFile.write(indexContent)
indexHtmlOutputFile.close()
# copy res dir
if cfg_obj.copy_res is None:
dst_dir = os.path.join(publish_dir, 'res')
src_dir = os.path.join(project_dir, 'res')
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
shutil.copytree(src_dir, dst_dir)
else:
for cfg in cfg_obj.copy_res:
cocos.copy_files_with_config(cfg, project_dir, publish_dir)
# copy to the output directory if necessary
pub_dir = os.path.normcase(publish_dir)
out_dir = os.path.normcase(os.path.normpath(self._output_dir))
if pub_dir != out_dir:
cpy_cfg = {
"from" : pub_dir,
"to" : out_dir
}
cocos.copy_files_with_config(cpy_cfg, pub_dir, out_dir)
def build_linux(self):
if not self._platforms.is_linux_active():
return
#if not cocos.os_is_linux():
# raise cocos.CCPluginError("Please build on linux")
project_dir = self._project.get_project_dir()
cfg_obj = self._platforms.get_current_config()
if cfg_obj.cmake_path is not None:
cmakefile_dir = os.path.join(project_dir, cfg_obj.cmake_path)
else:
cmakefile_dir = project_dir
if self._project._is_lua_project():
cmakefile_dir = os.path.join(project_dir, 'frameworks')
# get the project name
if cfg_obj.project_name is not None:
self.project_name = cfg_obj.project_name
else:
f = open(os.path.join(cmakefile_dir, 'CMakeLists.txt'), 'r')
for line in f.readlines():
if "set(APP_NAME " in line:
self.project_name = re.search('APP_NAME ([^\)]+)\)', line).group(1)
break
if cfg_obj.build_dir is not None:
build_dir = os.path.join(project_dir, cfg_obj.build_dir)
else:
build_dir = os.path.join(project_dir, 'linux-build')
if not os.path.exists(build_dir):
os.makedirs(build_dir)
with cocos.pushd(build_dir):
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
debug_state = 'ON' if self._is_debug_mode() else 'OFF'
self._run_cmd('cmake -DCMAKE_BUILD_TYPE=%s -DDEBUG_MODE=%s %s' % (build_mode, debug_state, os.path.relpath(cmakefile_dir, build_dir)))
with cocos.pushd(build_dir):
self._run_cmd('make -j%s' % self._jobs)
# move file
output_dir = self._output_dir
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
if cfg_obj.build_result_dir is not None:
result_dir = os.path.join(build_dir, 'bin', cfg_obj.build_result_dir)
else:
result_dir = os.path.join(build_dir, 'bin')
cocos.copy_files_in_dir(result_dir, output_dir)
self.run_root = output_dir
if self._no_res:
res_dir = os.path.join(output_dir, "Resources")
self._remove_res(res_dir)
if self._project._is_script_project() and self._compile_script:
cocos.Logging.warning(MultiLanguage.get_string('COMPILE_WARNING_NOT_SUPPORT_COMPILE_SCRIPT'))
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_SUCCEED'))
def get_wp8_product_id(self, manifest_file):
# get the product id from manifest
from xml.dom import minidom
ret = None
try:
doc_node = minidom.parse(manifest_file)
root_node = doc_node.documentElement
app_node = root_node.getElementsByTagName("App")[0]
ret = app_node.attributes["ProductID"].value
ret = ret.strip("{}")
except:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_MANIFEST_PARSE_FAILED_FMT', manifest_file),
cocos.CCPluginError.ERROR_PARSE_FILE)
return ret
def build_wp8(self):
if not self._platforms.is_wp8_active():
return
proj_path = self._project.get_project_dir()
sln_path = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", sln_path)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
wp8_projectdir = cfg_obj.wp8_proj_path
# build the project
self.project_name = name
projectPath = os.path.join(sln_path, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
# copy files
build_folder_path = os.path.join(wp8_projectdir, cfg_obj.build_folder_path, build_mode)
if not os.path.isdir(build_folder_path):
message = MultiLanguage.get_string('COMPILE_ERROR_BUILD_PATH_NOT_FOUND_FMT', build_folder_path)
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
# create output dir if it not existed
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# copy xap
files = os.listdir(build_folder_path)
proj_xap_name = "%s_%s_x86.xap" % (self.project_name, build_mode)
for filename in files:
if filename == proj_xap_name:
file_path = os.path.join(build_folder_path, filename)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_COPYING_FMT', filename))
shutil.copy(file_path, output_dir)
break
# get the manifest file path
manifest_file = os.path.join(wp8_projectdir, cfg_obj.manifest_path)
self.product_id = self.get_wp8_product_id(manifest_file)
self.run_root = output_dir
self.xap_file_name = proj_xap_name
def build_wp8_1(self):
if not self._platforms.is_wp8_1_active():
return
wp8_1_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", wp8_1_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
name = "%s.WindowsPhone" % name
# build the project
self.project_name = name
projectPath = os.path.join(wp8_1_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
def build_metro(self):
if not self._platforms.is_metro_active():
return
metro_projectdir = self._platforms.project_path()
output_dir = self._output_dir
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILDING'))
# get the solution file & project name
cfg_obj = self._platforms.get_current_config()
if cfg_obj.sln_file is not None:
sln_name = cfg_obj.sln_file
if cfg_obj.project_name is None:
raise cocos.CCPluginError(MultiLanguage.get_string('COMPILE_ERROR_CFG_NOT_FOUND_FMT',
(cocos_project.Win32Config.KEY_PROJECT_NAME,
cocos_project.Win32Config.KEY_SLN_FILE,
cocos_project.Project.CONFIG)),
cocos.CCPluginError.ERROR_WRONG_CONFIG)
else:
name = cfg_obj.project_name
else:
name, sln_name = self.checkFileByExtention(".sln", metro_projectdir)
if not sln_name:
message = MultiLanguage.get_string('COMPILE_ERROR_SLN_NOT_FOUND')
raise cocos.CCPluginError(message, cocos.CCPluginError.ERROR_PATH_NOT_FOUND)
name = "%s.Windows" % name
# build the project
self.project_name = name
projectPath = os.path.join(metro_projectdir, sln_name)
build_mode = 'Debug' if self._is_debug_mode() else 'Release'
self.build_vs_project(projectPath, self.project_name, build_mode, self.vs_version)
def checkFileByExtention(self, ext, path):
filelist = os.listdir(path)
for fullname in filelist:
name, extention = os.path.splitext(fullname)
if extention == ext:
return name, fullname
return (None, None)
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info(MultiLanguage.get_string('COMPILE_INFO_BUILD_MODE_FMT', self._mode))
self._update_build_cfg()
target_platform = self._platforms.get_current_platform()
args_build_copy = self._custom_step_args.copy()
language = self._project.get_language()
action_str = 'compile_%s' % language
target_str = 'compile_for_%s' % target_platform
cocos.DataStatistic.stat_event('compile', action_str, target_str)
# invoke the custom step: pre-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_PRE_BUILD, target_platform, args_build_copy)
self.build_android()
self.build_ios()
self.build_mac()
self.build_win32()
self.build_web()
self.build_linux()
self.build_wp8()
self.build_wp8_1()
self.build_metro()
# invoke the custom step: post-build
self._project.invoke_custom_step_script(cocos_project.Project.CUSTOM_STEP_POST_BUILD, target_platform, args_build_copy)
if len(self.end_warning) > 0:
cocos.Logging.warning(self.end_warning)
| |
import mock
import pytest
from service_configuration_lib.cached_view import _EventHandler
from service_configuration_lib.cached_view import ConfigsFileWatcher
from service_configuration_lib.yaml_cached_view import YamlConfigsCachedView
def test_event_handler_create(mock_configs_file_watcher, mock_event):
event_handler = _EventHandler(cache=mock_configs_file_watcher)
event_handler.process_IN_CREATE(event=mock_event)
mock_configs_file_watcher._maybe_add_path_to_cache.assert_called_once_with(mock_event.pathname)
def test_event_handler_move(mock_configs_file_watcher, mock_event):
event_handler = _EventHandler(cache=mock_configs_file_watcher)
event_handler.process_IN_MOVED_TO(event=mock_event)
mock_configs_file_watcher._maybe_add_path_to_cache.assert_called_once_with(mock_event.pathname)
def test_event_handler_delete(mock_configs_file_watcher, mock_event):
event_handler = _EventHandler(cache=mock_configs_file_watcher)
event_handler.process_IN_DELETE(event=mock_event)
mock_configs_file_watcher._maybe_remove_path_from_cache.assert_called_once_with(mock_event.pathname)
def test_event_handler_overflow(mock_configs_file_watcher, mock_event):
event_handler = _EventHandler(cache=mock_configs_file_watcher)
event_handler.process_IN_Q_OVERFLOW(event=mock_event)
assert mock_configs_file_watcher._needs_reconfigure
@pytest.mark.parametrize('deleted_folder', ['/foo', '/foo/bar'])
def test_event_handler_delete_self(mock_configs_file_watcher, mock_event, deleted_folder):
event_handler = _EventHandler(cache=mock_configs_file_watcher)
mock_event.pathname = deleted_folder
event_handler.process_IN_DELETE_SELF(event=mock_event)
assert mock_configs_file_watcher._needs_reconfigure == (deleted_folder == '/foo')
def test_exclude_filter_exclude_folders(configs_file_watcher):
assert configs_file_watcher._exclude_filter('/foo/bar/.~tmp~')
assert not configs_file_watcher._exclude_filter('/foo/bar/baz')
def test_exclude_filter_service_names_filtering(configs_file_watcher):
configs_file_watcher._services_names = ['myservice', 'another_service', 'star_service*']
assert configs_file_watcher._exclude_filter('/foo/bar/baz')
assert not configs_file_watcher._exclude_filter('/foo/myservice')
assert not configs_file_watcher._exclude_filter('/foo/another_service')
assert configs_file_watcher._exclude_filter('/foo/another_service2')
assert not configs_file_watcher._exclude_filter('/foo/star_service2')
assert not configs_file_watcher._exclude_filter('/foo/star_service')
assert not configs_file_watcher._exclude_filter('/foo/star_services/autotuned_defaults')
def test_stopping_notifier(configs_file_watcher):
notifier = configs_file_watcher._notifier
assert notifier is not None
configs_file_watcher.close()
assert configs_file_watcher._notifier is None
notifier.stop.assert_called_once()
def test_service_name_and_config_from_path(configs_file_watcher):
result = configs_file_watcher._service_name_and_config_from_path(
configs_file_watcher._configs_folder + '/new_service/config.json',
)
assert ('new_service', 'config', '.json') == result
def test_service_name_and_config_from_path_too_deep_config(configs_file_watcher):
result = configs_file_watcher._service_name_and_config_from_path(
configs_file_watcher._configs_folder + '/new_service/1/2/3/config.json',
)
assert result.service_name == 'new_service'
assert result.config_name == 'config'
assert result.config_suffix == '.json'
def test_service_name_and_config_from_path_top_level_file(configs_file_watcher):
result = configs_file_watcher._service_name_and_config_from_path(
configs_file_watcher._configs_folder + '/config.json',
)
assert result.service_name is None
assert result.config_name == 'config'
assert result.config_suffix == '.json'
def test_service_name_and_config_from_path_with_configs_names(configs_file_watcher):
configs_file_watcher._configs_names = ['config', 'new_config*']
result = configs_file_watcher._service_name_and_config_from_path(
configs_file_watcher._configs_folder + '/service12/configX.json',
)
assert result is None
def test_service_name_and_config_from_path_with_config_suffixes(configs_file_watcher):
configs_file_watcher._configs_suffixes = ['.yaml']
result = configs_file_watcher._service_name_and_config_from_path(
configs_file_watcher._configs_folder + '/service12/config.json',
)
assert result is None
def test_configs_file_watcher_process_events_with_limit(configs_file_watcher):
configs_file_watcher._notifier.check_events.side_effect = [True, True, True, True, False]
configs_file_watcher._notifier.process_events.side_effect = configs_file_watcher._process_inotify_event
assert configs_file_watcher._processed_events_count == 0
configs_file_watcher.process_events(limit=2)
assert configs_file_watcher._processed_events_count == 2
assert configs_file_watcher._notifier.read_events.call_count == 2
assert configs_file_watcher._notifier.process_events.call_count == 2
def test_configs_file_watcher_process_events_with_limit_2nd_iteration(configs_file_watcher):
configs_file_watcher._notifier.check_events.side_effect = [True, True, True, True, False]
configs_file_watcher._notifier.process_events.side_effect = configs_file_watcher._process_inotify_event
configs_file_watcher.process_events(limit=2)
configs_file_watcher.process_events(limit=2)
assert configs_file_watcher._processed_events_count == 4
assert configs_file_watcher._notifier.read_events.call_count == 4
assert configs_file_watcher._notifier.process_events.call_count == 4
def test_configs_file_watcher_process_events_with_overflow_in_the_middle(configs_file_watcher):
configs_file_watcher._notifier.check_events.side_effect = [True, True, True, True, False]
configs_file_watcher._processed_events_count = 2
configs_file_watcher._notifier.process_events.side_effect = configs_file_watcher.setup
configs_file_watcher.process_events(limit=3)
assert configs_file_watcher._processed_events_count == 0 # because overflow was resetting the counter
assert configs_file_watcher._notifier.read_events.call_count == 1
assert configs_file_watcher._notifier.process_events.call_count == 1
def test_configs_file_watcher_process_events(configs_file_watcher):
configs_file_watcher._notifier.check_events.side_effect = [True, False]
assert configs_file_watcher._notifier.read_events.call_count == 0
assert configs_file_watcher._notifier.process_events.call_count == 0
configs_file_watcher.setup = mock.Mock()
configs_file_watcher.process_events()
assert configs_file_watcher._notifier.read_events.call_count == 1
assert configs_file_watcher._notifier.process_events.call_count == 1
assert configs_file_watcher.setup.call_count == 0
def test_configs_file_watcher_process_events_reconfigure(configs_file_watcher):
configs_file_watcher._notifier.check_events.side_effect = [True, True, False]
configs_file_watcher._needs_reconfigure = True
assert configs_file_watcher._notifier.read_events.call_count == 0
assert configs_file_watcher._notifier.process_events.call_count == 0
configs_file_watcher.setup = mock.Mock()
configs_file_watcher.process_events()
assert configs_file_watcher._notifier.read_events.call_count == 1
assert configs_file_watcher._notifier.process_events.call_count == 1
assert configs_file_watcher.setup.call_count == 1
def test_wildcard_configs_names(
tmpdir,
mock_inotify_constants,
mock_watch_manager,
mock_notifier,
):
tmpdir.join('foo', 'bar-dev.yaml').write('{baz: 42}', ensure=True)
tmpdir.join('foo', 'bar-prod.yaml').write('{baz: 3939}', ensure=True)
watcher = ConfigsFileWatcher(
configs_view=YamlConfigsCachedView(),
configs_names=['bar-*'],
configs_suffixes=['.yaml'],
configs_folder=tmpdir,
)
assert watcher.configs_view.configs == {
'foo': {
'bar-dev': {
'baz': 42,
},
'bar-prod': {
'baz': 3939,
},
},
}
| |
'''
Created on Feb 15, 2012
@author: Jeff
'''
import numpy
import numpyTransform
from scipy.spatial import cKDTree as KDTree
# from scipy.spatial import Delaunay
from scipy.spatial.distance import cdist
import scipy.optimize
import time
from math import pi
from MatlabFunctions import MatlabFmincon
import nlopt
import sys
class ICP(object):
'''
classdocs
'''
def __init__(self, modelPointCloud, dataPointCloud, **kwargs):
'''
Supported Signatures
modelPointCloud
The model point cloud is the base to which the data point cloud will be matched
dataPointCloud
The data point cloud is transformed so that it matches the model point cloud
Key Word Arguments:
maxIterations
maximum number of iterations to perform, default is 10
TODO: in the future provide an option to also account for minimum acceptable error
matchingMethod
'kdtree' Use a KD-Tree for nearest neighbor search {default}
'bruteforce' Use brute force for nearest neighbor search
minimizeMethod
'point' Use point to point minimization {default}
'plane' Use point to plane minimization
weightMethod
function that takes indices into the modelPointCloud and returns the weight of those indices
By default all points are weighted equally
modelDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
dataDownsampleFactor
integer that represents uniform sampling of model point cloud
1 is no resampling, 2 is every other point, 3 is every third point...
ICP Process is five steps
1: Input Filter
2: Match
3: Outlier Filter
4: Error Minimization
5: Check if error is less than limits
yes: we are don
no: go back to step 2 with new transformation function
'''
self.startTime = time.time()
if 'modelDownsampleFactor' in kwargs and int(kwargs['modelDownsampleFactor']) > 1:
factor = int(kwargs['modelDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
modelDownSampleIndices = numpy.tile(temp, (modelPointCloud.shape[0] / factor) + 1)[:modelPointCloud.shape[0]]
else:
modelDownSampleIndices = numpy.ones(modelPointCloud.shape[0], dtype=numpy.bool)
if 'dataDownsampleFactor' in kwargs and int(kwargs['dataDownsampleFactor']) > 1:
factor = int(kwargs['dataDownsampleFactor'])
temp = numpy.zeros(factor, dtype=numpy.bool)
temp[-1] = True
dataDownSampleIndices = numpy.tile(temp, (dataPointCloud.shape[0] / factor) + 1)[:dataPointCloud.shape[0]]
else:
dataDownSampleIndices = numpy.ones(dataPointCloud.shape[0], dtype=numpy.bool)
# TODO: uniform sampling of point clouds
self.q = modelPointCloud[modelDownSampleIndices]
self.p = dataPointCloud[dataDownSampleIndices]
self.matlab = None
# get kwargs
if 'maxIterations' in kwargs:
self.K = int(kwargs['maxIterations'])
else:
self.K = 10
if 'matchingMethod' in kwargs:
if kwargs['matchingMethod'] == 'bruteforce':
self.matching = self.matchingBruteForce
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
else:
self.matching = self.matchingKDTree
self.qKDTree = KDTree(self.q)
if 'minimizeMethod' in kwargs:
if kwargs['minimizeMethod'] == 'plane': # point to plane
self.minimize = self.minimizePlane
elif kwargs['minimizeMethod'] == 'fmincon':
self.minimize = self.minimizeMatlab
self.matlab = MatlabFmincon()
elif kwargs['minimizeMethod'] == 'custom':
self.minimize = self.minimizeCustom
else: # point to point
self.minimize = self.minimizePoint
else:
self.minimize = self.minimizePoint
if 'weightMethod' in kwargs:
self.weightMethod = kwargs['weightMethod']
else:
self.weightMethod = self.weightEqual
# initialize translation and rotation matrix
self.transformMatrix = numpy.matrix(numpy.identity(4))
# initialize list of translations and rotation matrix for each iteration of ICP
self.totalTransformMatrix = [numpy.matrix(numpy.identity(4))]
self.pt = self.p.copy() # transformed point cloud
self.t = [] # array of times for each iteration of ICP
self.err = [] # error for each iteration of ICP
self.Np = self.p.shape[0] # number of points in data cloud
# preprocessing finish, log time
self.t.append(time.time() - self.startTime)
print 'Time for preprocessing:', self.t[-1]
def __del__(self):
if self.matlab is not None:
del self.matlab
def runICP(self, **kwargs):
tStart = time.time()
# get 'global' tolerances
if 'x0' in kwargs:
kwargs['initX0'] = kwargs['x0'].copy()
if 'lb' in kwargs:
kwargs['initLB'] = kwargs['lb'].copy()
if 'ub' in kwargs:
kwargs['initUB'] = kwargs['ub'].copy()
# main ICP loop
for k in xrange(self.K):
t1 = time.time()
minDistances, nearestNeighbor = self.matching(self.pt)
# get indices of the points we are interested in
p_idx = numpy.ones(self.p.shape[0], dtype=numpy.bool) # since there are no edges we are interested in all the points
q_idx = nearestNeighbor
print '\tTime to calc min distance:', time.time() - t1
# TODO: Input filtering
# reject some % of worst matches
# Multiresolution sampling
# add error for first iteration
if k == 0:
t1 = time.time()
self.err.append(numpy.sqrt(numpy.sum(minDistances ** 2) / minDistances.shape[0]))
print '\tInitial RMS error: %f, Time to calc: %f' % (self.err[-1], time.time() - t1)
# generate rotation matrix and translation
t1 = time.time()
weights = self.weightMethod(nearestNeighbor)
# get current cumulative rotation/translation in independent variable values, this way we can change the iteration bounds so that the global bounds are not violated
cummulativeX0 = numpy.zeros(9)
rotMat, tx, ty, tz, sx, sy, sz = numpyTransform.decomposeMatrix(self.totalTransformMatrix[-1])
rx, ry, rz = numpyTransform.rotationMat2Euler(rotMat)
cummulativeX0[0] = rx
cummulativeX0[1] = ry
cummulativeX0[2] = rz
cummulativeX0[3] = tx
cummulativeX0[4] = ty
cummulativeX0[5] = tz
cummulativeX0[6] = sx
cummulativeX0[7] = sy
cummulativeX0[8] = sz
R, T, S = self.minimize(self.q[q_idx], self.pt[p_idx], weights=weights, cummulativeX0=cummulativeX0, **kwargs)
print '\tTime to calc new transformation:', time.time() - t1
# create combined transformation matrix, apply this relative transformation to current transformation
transformMatrix = numpy.matrix(numpy.identity(4))
transformMatrix *= T
transformMatrix *= R
transformMatrix *= S
self.totalTransformMatrix.append(self.totalTransformMatrix[-1] * transformMatrix)
# apply last transformation
t1 = time.time()
self.pt = numpyTransform.transformPoints(self.totalTransformMatrix[-1], self.p)
print '\tTime to applying transform to all points:', time.time() - t1
# root mean of objective function
t1 = time.time()
self.err.append(self.rms_error(self.q[q_idx], self.pt[p_idx]))
print '\tIteration %d RMS error: %f, Time to calc: %f' % (k + 1, self.err[-1], time.time() - t1)
# TODO: add extrapolation
# store time to get to this iteration
self.t.append(time.time() - self.startTime)
print 'Iteration %d took %7.3f seconds' % (k + 1, self.t[-1] - self.t[-2])
print 'Total ICP run time:', time.time() - tStart
return self.totalTransformMatrix, self.err, self.t
def matchingKDTree(self, points):
minDistances, nearestNeighborIndex = self.qKDTree.query(points)
return minDistances, nearestNeighborIndex
def matchingBruteForce(self, points):
nearestNeighborIndex = numpy.zeros(points.shape[0])
distances = cdist(points, self.q) # calculate all combination of point distances
minDistances = distances.min(axis=1)
for i in xrange(points.shape[0]):
nearestNeighborIndex[i] = numpy.where(distances[i] == minDistances[i])[0][0]
return minDistances, nearestNeighborIndex
def minimizePoint(self, q, p, **kwargs):
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
if 'weights' in kwargs:
weights = kwargs['weights']
else:
raise Warning('weights argument not supplied')
return R, T
# function [R,T] = eq_point(q,p,weights)
m = p.shape[0]
n = q.shape[0]
# normalize weights
weights = weights / weights.sum()
# find data centroid and deviations from centroid
q_bar = (numpy.mat(q.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
q_mark = q - numpy.tile(q_bar, n).reshape((n, 3))
# Apply weights
q_mark = q_mark * numpy.repeat(weights, 3).reshape((weights.shape[0], 3))
# find data centroid and deviations from centroid
p_bar = (numpy.mat(p.T) * numpy.mat(weights[:, numpy.newaxis])).getA().squeeze()
p_mark = p - numpy.tile(p_bar, m).reshape((m, 3))
# Apply weights
# p_mark = p_mark * numpy.repeat(weights, 3).reshape((weights.shape[0],3))
N = (numpy.mat(p_mark).T * numpy.mat(q_mark)).getA() # taking points of q in matched order
[U, Ss, V] = numpy.linalg.svd(N); # singular value decomposition
V = (numpy.mat(V).H).getA()
RMattemp = numpy.mat(V) * numpy.mat(U).T
Ttemp = (numpy.mat(q_bar).T - RMattemp * numpy.mat(p_bar).T).getA().squeeze()
R[:3, :3] = RMattemp.getA()
T = numpyTransform.translation(Ttemp)
return R, T, S
def minimizeMatlab(self, modelPoints, dataPoints, **kwargs):
if 'x0' in kwargs:
x0 = kwargs['x0']
else:
raise Exception('There are no variables to solve for')
# check for initial settings and bounds so that we can calculate current settings and bounds
if 'initX0' in kwargs:
initX0 = kwargs['initX0']
if 'initLB' in kwargs:
initLB = kwargs['initLB']
if 'initUB' in kwargs:
initUB = kwargs['initUB']
if 'cummulativeX0' in kwargs:
cummulativeX0 = kwargs['cummulativeX0']
# NOTE: I think this only works if x0/initX) is all zeros
ub = initUB - (cummulativeX0 - initX0)
lb = initLB - (cummulativeX0 - initX0)
# rounding errors can cause Bounds to be incorrect
i = ub < x0
if numpy.any(i):
print 'upper bounds less than x0'
ub[i] = x0[i] + 10 * numpy.spacing(x0[i])
i = lb > x0
if numpy.any(i):
print 'lower bounds less than x0'
lb[i] = x0[i] - 10 * numpy.spacing(x0[i])
# if x0.shape[0] > 6 or ('scaleOnly' in kwargs and kwargs['scaleOnly']):
# raise Exception('Scaling is not currently supported it will screw things up. Need some way to control scaling bounds so that it stays in global scaling bounds')
try:
if 'scaleOnly' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-3:], lb[-3:], ub[-3:], scaleOnly=kwargs['scaleOnly'])
elif 'scaleOnlyIso' in kwargs:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[-1:], lb[-1:], ub[-1:], scaleOnlyIso=kwargs['scaleOnlyIso'])
else:
R, T, S = self.matlab.minimize(modelPoints, dataPoints, x0[:6], lb[:6], ub[:6]) # only rotation and translation
except:
sys.stderr.write('ERROR: Problem with matlab, closing matlab\n')
del self.matlab
self.matlab = None
return R, T, S
def minimizeCustom(self, p, q, **kwargs):
S = numpy.matrix(numpy.identity(4))
# TODO: try using functions from the nlopt module
def objectiveFunc(*args, **kwargs):
d = p
m = q
params = args[0]
if args[1].size > 0: # gradient
args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01]) # arbitrary gradient
# transform = numpy.matrix(numpy.identity(4))
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
Dicp = numpyTransform.transformPoints(transform, d)
# err = self.rms_error(m, Dicp)
err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
# err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
return err
x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if 'optAlg' in kwargs:
opt = nlopt.opt(kwargs['optAlg'], 6)
else:
opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)
opt.set_min_objective(objectiveFunc)
opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
opt.set_maxeval(1500)
params = opt.optimize(x0)
# output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
# params = output[0]
# params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)
# constraints = []
# varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = output[0]
# print 'Min error:', output[1]
# params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
return rotx * roty * rotz, S
def minimizePlane(self, p, q, **kwargs):
# TODO: Actually fill out
R = numpy.matrix(numpy.identity(4))
T = numpy.matrix(numpy.identity(4))
S = numpy.matrix(numpy.identity(4))
# function [R,T] = eq_plane(q,p,n,weights)
# n = n .* repmat(weights,3,1);
#
# c = cross(p,n);
#
# cn = vertcat(c,n);
#
# C = cn*transpose(cn);
#
# b = - [sum(sum((p-q).*repmat(cn(1,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(2,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(3,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(4,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(5,:),3,1).*n));
# sum(sum((p-q).*repmat(cn(6,:),3,1).*n))];
#
# X = C\b;
#
# cx = cos(X(1)); cy = cos(X(2)); cz = cos(X(3));
# sx = sin(X(1)); sy = sin(X(2)); sz = sin(X(3));
#
# R = [cy*cz cz*sx*sy-cx*sz cx*cz*sy+sx*sz;
# cy*sz cx*cz+sx*sy*sz cx*sy*sz-cz*sx;
# -sy cy*sx cx*cy];
#
# T = X(4:6);
return R, T, S
def weightEqual(self, qIndices):
return numpy.ones(qIndices.shape[0])
def rms_error(self, a, b):
'''
Determine the RMS error between two point equally sized point clouds with point correspondence.
NOTE: a and b need to have equal number of points
'''
if a.shape[0] != b.shape[0]:
raise Exception('Input Point clouds a and b do not have the same number of points')
distSq = numpy.sum((a - b) ** 2, axis=1)
err = numpy.sqrt(numpy.mean(distSq))
return err
def demo(*args, **kwargs):
import math
m = 80 # width of grid
n = m ** 2 # number of points
minVal = -2.0
maxVal = 2.0
delta = (maxVal - minVal) / (m - 1)
X, Y = numpy.mgrid[minVal:maxVal + delta:delta, minVal:maxVal + delta:delta]
X = X.flatten()
Y = Y.flatten()
Z = numpy.sin(X) * numpy.cos(Y)
# Create the data point-matrix
M = numpy.array([X, Y, Z]).T
# Translation values (a.u.):
Tx = 0.5
Ty = -0.3
Tz = 0.2
# Translation vector
T = numpyTransform.translation(Tx, Ty, Tz)
S = numpyTransform.scaling(1.0, N=4)
# Rotation values (rad.):
rx = 0.3
ry = -0.2
rz = 0.05
Rx = numpy.matrix([[1, 0, 0, 0],
[0, math.cos(rx), -math.sin(rx), 0],
[0, math.sin(rx), math.cos(rx), 0],
[0, 0, 0, 1]])
Ry = numpy.matrix([[math.cos(ry), 0, math.sin(ry), 0],
[0, 1, 0, 0],
[-math.sin(ry), 0, math.cos(ry), 0],
[0, 0, 0, 1]])
Rz = numpy.matrix([[math.cos(rz), -math.sin(rz), 0, 0],
[math.sin(rz), math.cos(rz), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Rotation matrix
R = Rx * Ry * Rz
transformMat = numpy.matrix(numpy.identity(4))
transformMat *= T
transformMat *= R
transformMat *= S
# Transform data-matrix plus noise into model-matrix
D = numpyTransform.transformPoints(transformMat, M)
# Add noise to model and data
M = M + 0.01 * numpy.random.randn(n, 3)
D = D + 0.01 * numpy.random.randn(n, 3)
# Run ICP (standard settings)
initialGuess = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
lowerBounds = numpy.array([-pi, -pi, -pi, -100.0, -100.0, -100.0])
upperBounds = numpy.array([pi, pi, pi, 100.0, 100.0, 100.0])
icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='fmincon', **kwargs)
# icp = ICP(M, D, maxIterations=15, dataDownsampleFactor=1, minimizeMethod='point', **kwargs)
transform, err, t = icp.runICP(x0=initialGuess, lb=lowerBounds, ub=upperBounds)
# Transform data-matrix using ICP result
Dicp = numpyTransform.transformPoints(transform[-1], D)
# Plot model points blue and transformed points red
if False:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(D[:, 0], D[:, 1], D[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.scatter(M[:, 0], M[:, 1], M[:, 2], c='r', marker='o')
ax.scatter(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], c='b', marker='^')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax = fig.add_subplot(2, 2, 3)
ax.plot(t, err, 'x--')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
plt.show()
else:
import visvis as vv
app = vv.use()
vv.figure()
vv.subplot(2, 2, 1)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(D[:, 0], D[:, 1], D[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('Red: z=sin(x)*cos(y), blue: transformed point cloud')
# Plot the results
vv.subplot(2, 2, 2)
vv.plot(M[:, 0], M[:, 1], M[:, 2], lc='b', ls='', ms='o')
vv.plot(Dicp[:, 0], Dicp[:, 1], Dicp[:, 2], lc='r', ls='', ms='x')
vv.xlabel('[0,0,1] axis')
vv.ylabel('[0,1,0] axis')
vv.zlabel('[1,0,0] axis')
vv.title('ICP result')
# Plot RMS curve
vv.subplot(2, 2, 3)
vv.plot(t, err, ls='--', ms='x')
vv.xlabel('time [s]')
vv.ylabel('d_{RMS}')
vv.title('KD-Tree matching')
if 'optAlg' in kwargs:
opt2 = nlopt.opt(kwargs['optAlg'], 2)
vv.title(opt2.get_algorithm_name())
del opt2
else:
vv.title('KD-Tree matching')
app.Run()
if __name__ == '__main__':
demo()
# demo2()
| |
# util/_concurrency_py3k.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import asyncio
import sys
from typing import Any
from typing import Callable
from typing import Coroutine
import greenlet
from . import compat
from .langhelpers import memoized_property
from .. import exc
if compat.py37:
try:
from contextvars import copy_context as _copy_context
# If greenlet.gr_context is present in current version of greenlet,
# it will be set with a copy of the current context on creation.
# Refs: https://github.com/python-greenlet/greenlet/pull/198
getattr(greenlet.greenlet, "gr_context")
except (ImportError, AttributeError):
_copy_context = None
else:
_copy_context = None
def is_exit_exception(e):
# note asyncio.CancelledError is already BaseException
# so was an exit exception in any case
return not isinstance(e, Exception) or isinstance(
e, (asyncio.TimeoutError, asyncio.CancelledError)
)
# implementation based on snaury gist at
# https://gist.github.com/snaury/202bf4f22c41ca34e56297bae5f33fef
# Issue for context: https://github.com/python-greenlet/greenlet/issues/173
class _AsyncIoGreenlet(greenlet.greenlet):
def __init__(self, fn, driver):
greenlet.greenlet.__init__(self, fn, driver)
self.driver = driver
if _copy_context is not None:
self.gr_context = _copy_context()
def await_only(awaitable: Coroutine) -> Any:
"""Awaits an async function in a sync method.
The sync method must be inside a :func:`greenlet_spawn` context.
:func:`await_` calls cannot be nested.
:param awaitable: The coroutine to call.
"""
# this is called in the context greenlet while running fn
current = greenlet.getcurrent()
if not isinstance(current, _AsyncIoGreenlet):
raise exc.MissingGreenlet(
"greenlet_spawn has not been called; can't call await_() here. "
"Was IO attempted in an unexpected place?"
)
# returns the control to the driver greenlet passing it
# a coroutine to run. Once the awaitable is done, the driver greenlet
# switches back to this greenlet with the result of awaitable that is
# then returned to the caller (or raised as error)
return current.driver.switch(awaitable)
def await_fallback(awaitable: Coroutine) -> Any:
"""Awaits an async function in a sync method.
The sync method must be inside a :func:`greenlet_spawn` context.
:func:`await_` calls cannot be nested.
:param awaitable: The coroutine to call.
"""
# this is called in the context greenlet while running fn
current = greenlet.getcurrent()
if not isinstance(current, _AsyncIoGreenlet):
loop = get_event_loop()
if loop.is_running():
raise exc.MissingGreenlet(
"greenlet_spawn has not been called and asyncio event "
"loop is already running; can't call await_() here. "
"Was IO attempted in an unexpected place?"
)
return loop.run_until_complete(awaitable)
return current.driver.switch(awaitable)
async def greenlet_spawn(
fn: Callable, *args, _require_await=False, **kwargs
) -> Any:
"""Runs a sync function ``fn`` in a new greenlet.
The sync function can then use :func:`await_` to wait for async
functions.
:param fn: The sync callable to call.
:param \\*args: Positional arguments to pass to the ``fn`` callable.
:param \\*\\*kwargs: Keyword arguments to pass to the ``fn`` callable.
"""
context = _AsyncIoGreenlet(fn, greenlet.getcurrent())
# runs the function synchronously in gl greenlet. If the execution
# is interrupted by await_, context is not dead and result is a
# coroutine to wait. If the context is dead the function has
# returned, and its result can be returned.
switch_occurred = False
try:
result = context.switch(*args, **kwargs)
while not context.dead:
switch_occurred = True
try:
# wait for a coroutine from await_ and then return its
# result back to it.
value = await result
except BaseException:
# this allows an exception to be raised within
# the moderated greenlet so that it can continue
# its expected flow.
result = context.throw(*sys.exc_info())
else:
result = context.switch(value)
finally:
# clean up to avoid cycle resolution by gc
del context.driver
if _require_await and not switch_occurred:
raise exc.AwaitRequired(
"The current operation required an async execution but none was "
"detected. This will usually happen when using a non compatible "
"DBAPI driver. Please ensure that an async DBAPI is used."
)
return result
class AsyncAdaptedLock:
@memoized_property
def mutex(self):
# there should not be a race here for coroutines creating the
# new lock as we are not using await, so therefore no concurrency
return asyncio.Lock()
def __enter__(self):
# await is used to acquire the lock only after the first calling
# coroutine has created the mutex.
await_fallback(self.mutex.acquire())
return self
def __exit__(self, *arg, **kw):
self.mutex.release()
def _util_async_run_coroutine_function(fn, *args, **kwargs):
"""for test suite/ util only"""
loop = get_event_loop()
if loop.is_running():
raise Exception(
"for async run coroutine we expect that no greenlet or event "
"loop is running when we start out"
)
return loop.run_until_complete(fn(*args, **kwargs))
def _util_async_run(fn, *args, **kwargs):
"""for test suite/ util only"""
loop = get_event_loop()
if not loop.is_running():
return loop.run_until_complete(greenlet_spawn(fn, *args, **kwargs))
else:
# allow for a wrapped test function to call another
assert isinstance(greenlet.getcurrent(), _AsyncIoGreenlet)
return fn(*args, **kwargs)
def get_event_loop():
"""vendor asyncio.get_event_loop() for python 3.7 and above.
Python 3.10 deprecates get_event_loop() as a standalone.
"""
if compat.py37:
try:
return asyncio.get_running_loop()
except RuntimeError:
return asyncio.get_event_loop_policy().get_event_loop()
else:
return asyncio.get_event_loop()
| |
import logging
from statistics import pstdev, mean
from time import perf_counter
from types import MethodType
import math
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState
from plenum.common.perf_util import get_memory_usage, timeit
from plenum.test.delayers import cr_delay
from stp_core.loop.eventually import eventually
from plenum.common.types import HA
from stp_core.common.log import getlogger, Logger
from plenum.test.helper import sdk_send_random_and_check
from plenum.test.node_catchup.helper import waitNodeDataEquality, \
check_ledger_state
from plenum.test.pool_transactions.helper import \
disconnect_node_and_ensure_disconnected
from plenum.test.test_node import checkNodesConnected, TestNode
from plenum.test import waits
# noinspection PyUnresolvedReferences
from plenum.test.node_catchup.conftest import whitelist
@pytest.fixture
def logger():
logger = getlogger()
old_value = logger.getEffectiveLevel()
logger.root.setLevel(logging.WARNING)
yield logger
logger.root.setLevel(old_value)
# autouse and inject before others in all tests
pytestmark = pytest.mark.usefixtures("logger")
txnCount = 5
TestRunningTimeLimitSec = math.inf
"""
Since these tests expect performance to be of certain level, they can fail and
for now should only be run when a perf check is required, like after a relevant
change in protocol, setting `SkipTests` to False will run tests in this
module
"""
SkipTests = True
skipper = pytest.mark.skipif(SkipTests, reason='Perf optimisations not done')
@pytest.fixture(scope="module")
def disable_node_monitor_config(tconf):
tconf.unsafe.add('disable_view_change')
# tconf.unsafe.add('disable_monitor')
return tconf
@pytest.fixture(scope="module")
def change_checkpoint_freq(tconf):
tconf.CHK_FREQ = 3
@skipper
def test_node_load(looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
capsys):
client_batches = 150
txns_per_batch = 25
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, perf_counter() - s))
@skipper
def test_node_load_consistent_time(tconf, change_checkpoint_freq,
disable_node_monitor_config, looper,
txnPoolNodeSet, capsys,
sdk_pool_handle, sdk_wallet_client):
# One of the reason memory grows is because spylog grows
client_batches = 300
txns_per_batch = 25
time_log = []
warm_up_batches = 10
tolerance_factor = 2
print_detailed_memory_usage = False
from pympler import tracker
tr = tracker.SummaryTracker()
node_methods_to_capture = [TestNode.executeBatch,
TestNode.recordAndPropagate,
TestNode.domainDynamicValidation,
TestNode.domainRequestApplication]
times = {n.name: {meth.__name__: [] for meth in node_methods_to_capture}
for n in txnPoolNodeSet}
for node in txnPoolNodeSet:
for meth in node_methods_to_capture:
meth_name = meth.__name__
patched = timeit(getattr(node, meth_name),
times[node.name][meth_name])
setattr(node, meth_name, patched)
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
t = perf_counter() - s
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, t))
print('--------Memory Usage details start')
for node in txnPoolNodeSet:
# print(sys.getsizeof(node))
print('---Node {}-----'.format(node))
# print('Requests {}'.format(asizeof.asizeof(node.requests, detail=1)))
print(
get_memory_usage(
node,
print_detailed_memory_usage,
get_only_non_empty=True))
for r in node.replicas.values():
print('---Replica {}-----'.format(r))
print(
get_memory_usage(
r,
print_detailed_memory_usage,
get_only_non_empty=True))
# if i % 3 == 0:
# tr.print_diff()
print('--------Memory Usage details end')
for node in txnPoolNodeSet:
for meth in node_methods_to_capture:
ts = times[node.name][meth.__name__]
print('{} {} {} {}'.format(
node, meth.__name__, mean(ts), ts))
if len(time_log) >= warm_up_batches:
m = mean(time_log)
sd = tolerance_factor * pstdev(time_log)
assert m > t or abs(t - m) <= sd, '{} {}'.format(abs(t - m), sd)
time_log.append(t)
@skipper
def test_node_load_after_add(sdk_new_node_caught_up, txnPoolNodeSet,
looper, sdk_pool_handle,
sdk_wallet_client, capsys):
"""
A node that restarts after some transactions should eventually get the
transactions which happened while it was down
:return:
"""
new_node = sdk_new_node_caught_up
logger.debug("Sending requests")
# Here's where we apply some load
client_batches = 300
txns_per_batch = 25
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, perf_counter() - s))
logger.debug("Starting the stopped node, {}".format(new_node))
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 5)
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4])
@skipper
def test_node_load_after_add_then_disconnect(sdk_new_node_caught_up, txnPoolNodeSet,
tconf, looper, sdk_pool_handle,
sdk_wallet_client,
tdirWithPoolTxns, allPluginsPath,
capsys):
"""
A node that restarts after some transactions should eventually get the
transactions which happened while it was down
:return:
"""
new_node = sdk_new_node_caught_up
with capsys.disabled():
print("Stopping node {} with pool ledger size {}".
format(new_node, new_node.poolManager.txnSeqNo))
disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, new_node)
looper.removeProdable(new_node)
client_batches = 80
txns_per_batch = 10
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, perf_counter() - s))
with capsys.disabled():
print("Starting the stopped node, {}".format(new_node))
nodeHa, nodeCHa = HA(*new_node.nodestack.ha), HA(*new_node.clientstack.ha)
new_node = TestNode(
new_node.name,
basedirpath=tdirWithPoolTxns,
base_data_dir=tdirWithPoolTxns,
config=tconf,
ha=nodeHa,
cliha=nodeCHa,
pluginPaths=allPluginsPath)
looper.add(new_node)
txnPoolNodeSet[-1] = new_node
# Delay catchup reply processing so LedgerState does not change
delay_catchup_reply = 5
new_node.nodeIbStasher.delay(cr_delay(delay_catchup_reply))
looper.run(checkNodesConnected(txnPoolNodeSet))
# Make sure ledger starts syncing (sufficient consistency proofs received)
looper.run(eventually(check_ledger_state, new_node, DOMAIN_LEDGER_ID,
LedgerState.syncing, retryWait=.5, timeout=5))
# Not accurate timeout but a conservative one
timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \
2 * delay_catchup_reply
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4],
customTimeout=timeout)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 5)
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4])
@skipper
def test_nodestack_contexts_are_discrete(txnPoolNodeSet):
assert txnPoolNodeSet[0].nodestack.ctx != txnPoolNodeSet[1].nodestack.ctx
ctx_objs = {n.nodestack.ctx for n in txnPoolNodeSet}
ctx_underlying = {n.nodestack.ctx.underlying for n in txnPoolNodeSet}
assert len(ctx_objs) == len(txnPoolNodeSet)
assert len(ctx_underlying) == len(txnPoolNodeSet)
@skipper
def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf,
allPluginsPath,
tdirWithPoolTxns,
sdk_pool_handle,
sdk_wallet_client,
capsys):
nodes = txnPoolNodeSet
x = nodes[-1]
with capsys.disabled():
print("Stopping node {} with pool ledger size {}".
format(x, x.poolManager.txnSeqNo))
disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, x)
looper.removeProdable(x)
client_batches = 80
txns_per_batch = 10
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, perf_counter() - s))
nodeHa, nodeCHa = HA(*x.nodestack.ha), HA(*x.clientstack.ha)
newNode = TestNode(x.name, basedirpath=tdirWithPoolTxns, base_data_dir=tdirWithPoolTxns, config=tconf,
ha=nodeHa, cliha=nodeCHa, pluginPaths=allPluginsPath)
looper.add(newNode)
txnPoolNodeSet[-1] = newNode
looper.run(checkNodesConnected(txnPoolNodeSet))
@skipper
def test_node_load_after_one_node_drops_all_msgs(
looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
capsys):
nodes = txnPoolNodeSet
x = nodes[-1]
with capsys.disabled():
print("Patching node {}".format(x))
def handleOneNodeMsg(self, wrappedMsg):
# do nothing with an incoming node message
pass
x.handleOneNodeMsg = MethodType(handleOneNodeMsg, x)
client_batches = 120
txns_per_batch = 25
for i in range(client_batches):
s = perf_counter()
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, txns_per_batch)
with capsys.disabled():
print('{} executed {} client txns in {:.2f} seconds'.
format(i + 1, txns_per_batch, perf_counter() - s))
| |
import os
import shutil
import tempfile
from collections import namedtuple
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from ludwig.constants import PROC_COLUMN, NAME
from ludwig.data.dataset_synthesizer import build_synthetic_dataset
from ludwig.data.preprocessing import preprocess_for_training
from ludwig.features.feature_utils import SEQUENCE_TYPES, compute_feature_hash
from ludwig.models.ecd import build_single_input, build_single_output
from tests.integration_tests.utils import category_feature
from tests.integration_tests.utils import date_feature
from tests.integration_tests.utils import image_feature
from tests.integration_tests.utils import numerical_feature
from tests.integration_tests.utils import sequence_feature
from tests.integration_tests.utils import set_feature
BATCH_SIZE = 32
HIDDEN_SIZE = 128
SEQ_SIZE = 10
RANDOM_SEED = 42
IMAGE_DIR = tempfile.mkdtemp()
# SyntheticData namedtuple structure:
# batch_size: Number of records to generate for a batch
# feature_generator: Ludwig synthetic generator class
# feature_generator_args: tuple of required positional arguments
# feature_generator_kwargs: dictionary of required keyword arguments
SyntheticData = namedtuple(
'SyntheticData',
'batch_size feature_generator feature_generator_args feature_generator_kwargs'
)
# TestCase namedtuple structure:
# syn_data: SyntheticData namedtuple of data to create
# XCoder_other_parms: dictionary for required encoder/decoder parameters
# regularizer_parm_names: list of regularizer keyword parameter names
TestCase = namedtuple('TestCase',
'syn_data XCoder_other_parms regularizer_parm_names')
#
# Regularization Encoder Tests
#
@pytest.mark.parametrize(
'test_case',
[
# DenseEncoder
TestCase(
SyntheticData(BATCH_SIZE, numerical_feature, (), {}),
{'num_layers': 2, 'encoder': 'dense',
'preprocessing': {'normalization': 'zscore'}},
['activity_regularizer', 'weights_regularizer', 'bias_regularizer']
),
# Image Encoders
TestCase(
SyntheticData(BATCH_SIZE, image_feature, (IMAGE_DIR,), {}),
{'encoder': 'stacked_cnn'},
[
'conv_activity_regularizer', 'conv_weights_regularizer',
'conv_bias_regularizer',
'fc_activity_regularizer', 'fc_weights_regularizer',
'fc_bias_regularizer',
]
),
TestCase(
SyntheticData(BATCH_SIZE, image_feature, (IMAGE_DIR,), {}),
{'encoder': 'resnet'},
[
'activity_regularizer', 'weights_regularizer',
'bias_regularizer',
]
),
# Categorical encoder
TestCase(
SyntheticData(BATCH_SIZE, category_feature, (), {}),
{'representation': 'dense'},
[
'embedding_regularizer',
]
),
# Date encoder
TestCase(
SyntheticData(BATCH_SIZE, date_feature, (), {}),
{},
[
'activity_regularizer', 'weights_regularizer',
'bias_regularizer',
]
),
# ParallelCNN Encoder
TestCase(
SyntheticData(BATCH_SIZE, sequence_feature, (), {}),
{'encoder': 'parallel_cnn', 'cell_type': 'gru'},
[
'activity_regularizer', 'weights_regularizer',
'bias_regularizer',
]
),
# Set Encoder
TestCase(
SyntheticData(BATCH_SIZE, set_feature, (), {}),
{},
[
'activity_regularizer', 'weights_regularizer',
'bias_regularizer',
]
),
]
)
def test_encoder(test_case):
# set up required directories for images if needed
shutil.rmtree(IMAGE_DIR, ignore_errors=True)
os.mkdir(IMAGE_DIR)
# reproducible synthetic data set
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
# create synthetic data for the test
features = [
test_case.syn_data.feature_generator(
*test_case.syn_data.feature_generator_args,
**test_case.syn_data.feature_generator_kwargs
)
]
name = features[0][NAME]
proc_column = compute_feature_hash(features[0])
features[0][PROC_COLUMN] = proc_column
data_generator = build_synthetic_dataset(BATCH_SIZE, features)
data_list = list(data_generator)
raw_data = [x[0] for x in data_list[1:]]
df = pd.DataFrame({data_list[0][0]: raw_data})
# minimal config sufficient to create the input feature
config = {'input_features': features, 'output_features': []}
training_set, _, _, training_set_metadata = preprocess_for_training(
config,
training_set=df,
skip_save_processed_input=True,
random_seed=RANDOM_SEED
)
# run through each type of regularizer for the encoder
regularizer_losses = []
for regularizer in [None, 'l1', 'l2', 'l1_l2']:
# start with clean slate and make reproducible
tf.keras.backend.clear_session()
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
# setup kwarg for regularizer parms
x_coder_kwargs = dict(
zip(test_case.regularizer_parm_names,
len(test_case.regularizer_parm_names) * [regularizer])
)
# combine other other keyword parameters
x_coder_kwargs.update(test_case.XCoder_other_parms)
features[0].update(x_coder_kwargs)
# shim code to support sequence/sequence like features
if features[0]['type'] in SEQUENCE_TYPES.union({'category', 'set'}):
features[0]['vocab'] = training_set_metadata[name][
'idx2str']
training_set.dataset[proc_column] = \
training_set.dataset[proc_column].astype(np.int32)
input_def_obj = build_single_input(features[0], None)
inputs = training_set.dataset[proc_column]
# make sure we are at least rank 2 tensor
if len(inputs.shape) == 1:
inputs = inputs.reshape(-1, 1)
# special handling for image feature
if features[0]['type'] == 'image':
inputs = tf.cast(inputs, tf.float32) / 255
input_def_obj.encoder_obj(inputs)
regularizer_loss = tf.reduce_sum(input_def_obj.encoder_obj.losses)
regularizer_losses.append(regularizer_loss)
# check loss regularization loss values
# None should be zero
assert regularizer_losses[0] == 0
# l1, l2 and l1_l2 should be greater than zero
assert np.all([t > 0.0 for t in regularizer_losses[1:]])
# # using default setting l1 + l2 == l1_l2 losses
assert np.isclose(
regularizer_losses[1].numpy() + regularizer_losses[2].numpy(),
regularizer_losses[3].numpy())
# cleanup
shutil.rmtree(IMAGE_DIR, ignore_errors=True)
#
# Regularization Decoder Tests
#
@pytest.mark.parametrize(
'test_case',
[
# regressor decoder
TestCase(
SyntheticData(BATCH_SIZE, numerical_feature, (), {}),
{
'decoder': 'regressor',
'loss': {'type': 'mean_squared_error'},
'num_fc_layers': 5
},
['activity_regularizer', 'weights_regularizer', 'bias_regularizer']
),
# Tagger Decoder
TestCase(
SyntheticData(BATCH_SIZE, sequence_feature, (),
{'max_len': SEQ_SIZE}),
{'decoder': 'tagger'},
['activity_regularizer', 'weights_regularizer', 'bias_regularizer']
),
# Generator Decoder
TestCase(
SyntheticData(BATCH_SIZE, sequence_feature, (),
{'max_len': SEQ_SIZE}),
{'decoder': 'generator', 'cell_type': 'gru'},
['activity_regularizer', 'weights_regularizer', 'bias_regularizer']
),
]
)
def test_decoder(test_case):
# reproducible synthetic data set
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
# create synthetic data for the test
features = [
test_case.syn_data.feature_generator(
*test_case.syn_data.feature_generator_args,
**test_case.syn_data.feature_generator_kwargs
)
]
feature_name = features[0][NAME]
proc_column = compute_feature_hash(features[0])
features[0][PROC_COLUMN] = proc_column
data_generator = build_synthetic_dataset(BATCH_SIZE, features)
data_list = list(data_generator)
raw_data = [x[0] for x in data_list[1:]]
df = pd.DataFrame({data_list[0][0]: raw_data})
# create synthetic combiner layer
combiner_outputs_rank2 = {
'combiner_output': tf.random.normal(
[BATCH_SIZE, HIDDEN_SIZE],
dtype=tf.float32
)
}
combiner_outputs_rank3 = {
'combiner_output': tf.random.normal(
[BATCH_SIZE, SEQ_SIZE, HIDDEN_SIZE],
dtype=tf.float32
),
'encoder_output_state': tf.random.normal(
[BATCH_SIZE, HIDDEN_SIZE],
dtype=tf.float32
),
'lengths': tf.convert_to_tensor(
np.array(BATCH_SIZE * [SEQ_SIZE]),
dtype=tf.int32
)
}
# minimal config sufficient to create output feature
config = {'input_features': [], 'output_features': features}
training_set, _, _, training_set_metadata = preprocess_for_training(
config,
training_set=df,
skip_save_processed_input=True,
random_seed=RANDOM_SEED
)
# run through each type of regularizer
regularizer_losses = []
for regularizer in [None, 'l1', 'l2', 'l1_l2']:
# start with clean slate and make reproducible
tf.keras.backend.clear_session()
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
# setup kwarg for regularizer parms
x_coder_kwargs = dict(
zip(test_case.regularizer_parm_names,
len(test_case.regularizer_parm_names) * [regularizer])
)
# combine other other keyword parameters
x_coder_kwargs.update(test_case.XCoder_other_parms)
features[0].update(x_coder_kwargs)
if features[0]['type'] in SEQUENCE_TYPES:
features[0]['num_classes'] = training_set_metadata[feature_name][
'vocab_size'] + 1
training_set.dataset[proc_column] = \
training_set.dataset[proc_column].astype(np.int32)
combiner_outputs = combiner_outputs_rank3
else:
combiner_outputs = combiner_outputs_rank2
output_def_obj = build_single_output(features[0], None, None)
targets = training_set.dataset[proc_column]
if len(targets.shape) == 1:
targets = targets.reshape(-1, 1)
output_def_obj(
(
(combiner_outputs, None),
targets
),
training=True,
mask=None
)
regularizer_loss = tf.reduce_sum(output_def_obj.decoder_obj.losses)
regularizer_losses.append(regularizer_loss)
# check loss regularization loss values
# None should be zero
assert regularizer_losses[0] == 0
# l1, l2 and l1_l2 should be greater than zero
assert np.all([t > 0.0 for t in regularizer_losses[1:]])
# # using default setting l1 + l2 == l1_l2 losses
assert np.isclose(
regularizer_losses[1].numpy() + regularizer_losses[2].numpy(),
regularizer_losses[3].numpy())
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import shutil
import tempfile
import threading
import unittest
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class KerasCallbacksTest(test.TestCase):
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.test_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.test_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.test_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)),
0.01,
atol=1e-4)
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.test_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
with open(filepath) as csvfile:
output = ' '.join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.test_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
# case: Sequential
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=temp_dir, histogram_freq=1, write_images=True,
write_grads=True, batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0)
# fit generator without validation data and accuracy
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks)
assert os.path.exists(temp_dir)
def test_TensorBoard_histogram_freq_must_have_validation_data(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
else:
yield (x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
i %= max_batch_index
inp = keras.Input((INPUT_DIM,))
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit w/o validation data should raise ValueError if histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit(
x_train, y_train, batch_size=BATCH_SIZE, callbacks=cbs, epochs=3)
for cb in cbs:
cb.on_train_end()
# fit generator without validation data should raise ValueError if
# histogram_freq > 0
cbs = callbacks_factory(histogram_freq=1)
with self.assertRaises(ValueError):
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbs)
for cb in cbs:
cb.on_train_end()
# Make sure file writer cache is clear to avoid failures during cleanup.
writer_cache.FileWriterCache.clear()
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.test_session():
filepath = os.path.join(tmpdir, 'logs')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield ([x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
else:
yield ([x_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]] * 2)
i += 1
i %= max_batch_index
inp1 = keras.Input((INPUT_DIM,))
inp2 = keras.Input((INPUT_DIM,))
inp = keras.layers.add([inp1, inp2])
hidden = keras.layers.Dense(2, activation='relu')(inp)
hidden = keras.layers.Dropout(0.1)(hidden)
output1 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
output2 = keras.layers.Dense(NUM_CLASSES, activation='softmax')(hidden)
model = keras.models.Model([inp1, inp2], [output1, output2])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# we must generate new callbacks for each test, as they aren't stateless
def callbacks_factory(histogram_freq):
return [keras.callbacks.TensorBoard(log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True, write_grads=True,
batch_size=5)]
# fit without validation data
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0), epochs=3)
# fit with validation data and accuracy
model.fit([x_train] * 2, [y_train] * 2, batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1), epochs=2)
# fit generator without validation data
model.fit_generator(data_generator(True), len(x_train), epochs=2,
callbacks=callbacks_factory(histogram_freq=0))
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(x_train), epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = summary_pb2.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, summary_pb2.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
# non_trainable_weights: moving_variance, moving_mean
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
keras.callbacks.TensorBoard._init_writer = _init_writer
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0)
self.assertAllEqual(tsb.writer.steps_seen, [0, 0.5, 1, 1.5, 2, 2.5])
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_dim=100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = keras.callbacks.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0)
self.assertTrue(os.path.exists(tmpdir))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.test_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.5, patience=4, verbose=1),
keras.callbacks.TensorBoard(log_dir=temp_dir)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert os.path.exists(temp_dir)
def test_Tensorboard_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir)
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {'acc': np.float32(batch)})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub(object):
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if 'batch_' in summary.value[0].tag:
self.batch_summary = (step, summary)
elif 'epoch_' in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = keras.callbacks.TensorBoard(temp_dir)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {'acc': np.float32(5.0)})
tb_cbk.on_epoch_end(0, {'acc': np.float32(10.0)})
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_util.run_in_graph_and_eager_modes
def test_Tensorboard_eager(self):
with self.test_session():
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='binary_crossentropy',
optimizer=adam.AdamOptimizer(0.01),
metrics=['accuracy'])
cbks = [keras.callbacks.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertTrue(os.path.exists(temp_dir))
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.test_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
if __name__ == '__main__':
test.main()
| |
"""Definitions for DSMR Reader sensors added to MQTT."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable, Final
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntityDescription,
)
from homeassistant.const import (
CURRENCY_EURO,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_GAS,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
POWER_KILO_WATT,
VOLUME_CUBIC_METERS,
)
PRICE_EUR_KWH: Final = f"EUR/{ENERGY_KILO_WATT_HOUR}"
PRICE_EUR_M3: Final = f"EUR/{VOLUME_CUBIC_METERS}"
def dsmr_transform(value):
"""Transform DSMR version value to right format."""
if value.isdigit():
return float(value) / 10
return value
def tariff_transform(value):
"""Transform tariff from number to description."""
if value == "1":
return "low"
return "high"
@dataclass
class DSMRReaderSensorEntityDescription(SensorEntityDescription):
"""Sensor entity description for DSMR Reader."""
state: Callable | None = None
SENSORS: tuple[DSMRReaderSensorEntityDescription, ...] = (
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_delivered_1",
name="Low tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_returned_1",
name="Low tariff returned",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_delivered_2",
name="High tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_returned_2",
name="High tariff returned",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_currently_delivered",
name="Current power usage",
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/electricity_currently_returned",
name="Current power return",
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l1",
name="Current power usage L1",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l2",
name="Current power usage L2",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_delivered_l3",
name="Current power usage L3",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l1",
name="Current power return L1",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l2",
name="Current power return L2",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_currently_returned_l3",
name="Current power return L3",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_POWER,
native_unit_of_measurement=POWER_KILO_WATT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/extra_device_delivered",
name="Gas meter usage",
entity_registry_enabled_default=False,
icon="mdi:fire",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l1",
name="Current voltage L1",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l2",
name="Current voltage L2",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_voltage_l3",
name="Current voltage L3",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_VOLTAGE,
native_unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l1",
name="Phase power current L1",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l2",
name="Phase power current L2",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/phase_power_current_l3",
name="Phase power current L3",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_CURRENT,
native_unit_of_measurement=ELECTRIC_CURRENT_AMPERE,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/reading/timestamp",
name="Telegram timestamp",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/delivered",
name="Gas usage",
device_class=DEVICE_CLASS_GAS,
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/currently_delivered",
name="Current gas usage",
device_class=DEVICE_CLASS_GAS,
native_unit_of_measurement=VOLUME_CUBIC_METERS,
state_class=STATE_CLASS_MEASUREMENT,
),
DSMRReaderSensorEntityDescription(
key="dsmr/consumption/gas/read_at",
name="Gas meter read",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1",
name="Low tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2",
name="High tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1_returned",
name="Low tariff return",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2_returned",
name="High tariff return",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_merged",
name="Power usage total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_returned_merged",
name="Power return total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity1_cost",
name="Low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity2_cost",
name="High tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/electricity_cost_merged",
name="Power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/gas",
name="Gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/gas_cost",
name="Gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/total_cost",
name="Total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_delivered_1",
name="Low tariff delivered price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_delivered_2",
name="High tariff delivered price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_returned_1",
name="Low tariff returned price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_electricity_returned_2",
name="High tariff returned price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_KWH,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/energy_supplier_price_gas",
name="Gas price",
icon="mdi:currency-eur",
native_unit_of_measurement=PRICE_EUR_M3,
),
DSMRReaderSensorEntityDescription(
key="dsmr/day-consumption/fixed_cost",
name="Current day fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/dsmr_version",
name="DSMR version",
entity_registry_enabled_default=False,
icon="mdi:alert-circle",
state=dsmr_transform,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/electricity_tariff",
name="Electricity tariff",
icon="mdi:flash",
state=tariff_transform,
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/power_failure_count",
name="Power failure count",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/long_power_failure_count",
name="Long power failure count",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l1",
name="Voltage sag L1",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l2",
name="Voltage sag L2",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_sag_count_l3",
name="Voltage sag L3",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l1",
name="Voltage swell L1",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l2",
name="Voltage swell L2",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/voltage_swell_count_l3",
name="Voltage swell L3",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/meter-stats/rejected_telegrams",
name="Rejected telegrams",
entity_registry_enabled_default=False,
icon="mdi:flash",
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1",
name="Current month low tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2",
name="Current month high tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1_returned",
name="Current month low tariff returned",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2_returned",
name="Current month high tariff returned",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_merged",
name="Current month power usage total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_returned_merged",
name="Current month power return total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity1_cost",
name="Current month low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity2_cost",
name="Current month high tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/electricity_cost_merged",
name="Current month power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/gas",
name="Current month gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/gas_cost",
name="Current month gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/fixed_cost",
name="Current month fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-month/total_cost",
name="Current month total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1",
name="Current year low tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2",
name="Current year high tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1_returned",
name="Current year low tariff returned",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2_returned",
name="Current year high tariff usage",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_merged",
name="Current year power usage total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_returned_merged",
name="Current year power returned total",
device_class=DEVICE_CLASS_ENERGY,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity1_cost",
name="Current year low tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity2_cost",
name="Current year high tariff cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/electricity_cost_merged",
name="Current year power total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/gas",
name="Current year gas usage",
icon="mdi:counter",
native_unit_of_measurement=VOLUME_CUBIC_METERS,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/gas_cost",
name="Current year gas cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/fixed_cost",
name="Current year fixed cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
DSMRReaderSensorEntityDescription(
key="dsmr/current-year/total_cost",
name="Current year total cost",
icon="mdi:currency-eur",
native_unit_of_measurement=CURRENCY_EURO,
),
)
| |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth import get_user_model
from model_mommy import mommy
from fabric_bolt.projects import models
from fabric_bolt.projects.util import get_fabfile_path, build_command, parse_task_details
User = get_user_model()
class BasicTests(TestCase):
project_type = None
project = None
stage = None
configuration = None
task = None
deployment = None
def setUp(self):
password = 'mypassword'
self.user = User.objects.create_superuser(email='myemail@test.com', password=password)
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
self._create_project()
def _create_project(self):
# Bare bones project type
project_type = models.ProjectType()
project_type.name = 'Django'
self.project_type = project_type.save()
# Bare bones project
project = models.Project()
project.name = 'TEST_PROJECT'
project.type = project_type
project.description = 'TEST_DESCRIPTION'
project.save()
# Bare bones stage
stage = models.Stage()
stage.project = project
stage.name = 'Production'
stage.save()
self.stage = stage
# Bare bones configuration
configuration = models.Configuration()
configuration.project = project
configuration.stage = stage
configuration.key = 'KEY'
configuration.value = 'VALUE'
configuration.prompt_me_for_input = True
configuration.save()
self.configuration = configuration
# Bare bones task
task = models.Task()
task.name = 'TASK_NAME'
task.save()
self.task = task
# Bare bones deployment
deployment = models.Deployment()
deployment.user = self.user
deployment.stage = stage
deployment.comments = 'COMMENTS'
deployment.output = 'OUTPUT'
deployment.task = task
deployment.save()
self.deployment = deployment
self.project = project
def test_project_crud_urls(self):
"""
Tests that all views return status code of 200
"""
c = self.client
result = c.get(reverse('projects_project_create'))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_project_view', args=(self.project.pk,)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_project_update', args=(self.project.pk,)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_project_delete', args=(self.project.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_project_configuration_urls(self):
"""
Tests that all views return status code of 200
"""
c = self.client
result = c.get(reverse('projects_configuration_create', args=(self.project.pk,)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_configuration_stage_create', args=(self.project.pk, self.stage.pk)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_configuration_update', args=(self.configuration.pk,)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_configuration_delete', args=(self.configuration.pk,)))
self.assertIn(result.status_code, [200, 302])
def test_project_deployment_urls(self):
"""
Tests that all views return status code of 200
"""
c = self.client
result = c.get(reverse('projects_deployment_create', args=(self.stage.pk, 'bootstrap')))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_deployment_detail', args=(self.deployment.pk,)))
self.assertIn(result.status_code, [200, 302])
# result = c.get(reverse('projects_deployment_output', args=(self.deployment.pk,)))
# self.assertIn(result.status_code, [200, 302])
def test_project_stage_urls(self):
"""
Tests that all views return status code of 200
"""
c = self.client
result = c.get(reverse('projects_stage_create', args=(self.project.pk, )))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_stage_update', args=(self.project.pk, self.stage.pk)))
self.assertIn(result.status_code, [200, 302])
result = c.get(reverse('projects_stage_view', args=(self.project.pk, self.stage.pk)))
self.assertIn(result.status_code, [200, 302])
def test_stage_configuration_cram_a_lam(self):
"""Let's make sure our configuration mashing together works as expected"""
project_configs = [
{'key': 'number1', 'value': '100'},
{'key': 'number2', 'value': '200'},
{'key': 'number3', 'value': '300'},
{'key': 'number4', 'value': '400'},
]
for config in project_configs:
c = models.Configuration()
c.project = self.project
c.key = config['key']
c.value = config['value']
c.save()
configurations_round_one = self.stage.get_configurations()
# These should be what we're expecting
self.assertEqual(configurations_round_one['number1'].get_value(), '100')
self.assertEqual(configurations_round_one['number2'].get_value(), '200')
self.assertEqual(configurations_round_one['number3'].get_value(), '300')
self.assertEqual(configurations_round_one['number4'].get_value(), '400')
stage_configs = [
{'key': 'number2', 'value': '5'},
{'key': 'number3', 'value': '4'},
{'key': 'number4', 'value': '3'},
]
for config in stage_configs:
c = models.Configuration()
c.project = self.project
c.stage = self.stage
c.key = config['key']
c.value = config['value']
c.save()
configurations = self.stage.get_configurations()
# The stage configs take the cake over project configs
self.assertEqual(configurations['number1'].get_value(), '100')
self.assertEqual(configurations['number2'].get_value(), '5')
self.assertEqual(configurations['number3'].get_value(), '4')
self.assertEqual(configurations['number4'].get_value(), '3')
class UtilTests(TestCase):
def test_build_command_injection(self):
deployment = mommy.make(models.Deployment, task__name='test_env')
configuration = mommy.make(models.Configuration, key='foo=bar -i /path/to/keyfile --set foo2', value='bar')
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
fabfile_path, active_loc = get_fabfile_path(deployment.stage.project)
self.assertEqual(
command,
'fab test_env --set "foo\\=bar -i /path/to/keyfile --set foo2=bar" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
configuration = mommy.make(models.Configuration, key='dummy_key', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
self.assertEqual(
command,
'fab test_env --set "foo\=bar -i /path/to/keyfile --set foo2=bar,dummy_key=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key=test" | ls #', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key\=test\\" | ls #=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key', value='dummy_value,x=y')
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key=dummy_value\,x\=y" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
deployment.stage.configuration_set.clear()
configuration = mommy.make(models.Configuration, key='dummy_key=blah,x', value='dummy_value')
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
self.assertEqual(
command,
'fab test_env --set "dummy_key\=blah\,x=dummy_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
def test_build_command_with_args(self):
deployment = mommy.make(models.Deployment, task__name='test_env')
configuration = mommy.make(models.Configuration, key='arg', value='arg_value', task_argument=True)
deployment.stage.configuration_set.add(configuration)
command = build_command(deployment, {})
fabfile_path, active_loc = get_fabfile_path(deployment.stage.project)
self.assertEqual(
command,
'fab test_env:arg="arg_value" '
'--abort-on-prompts --fabfile={}'.format(fabfile_path)
)
def test_parse_task_details(self):
output = """Displaying detailed information for task 'test_env':
No docstring provided
Arguments: arg, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22, arg23, arg24, arg25, arg26, arg27, arg28, arg29, arg30
"""
details = parse_task_details('test_env', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'test_env')
self.assertEqual(details[1], None)
self.assertListEqual(details[2], ['arg', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9', 'arg10', 'arg11', 'arg12', 'arg13', 'arg14', 'arg15', 'arg16', 'arg17', 'arg18', 'arg19', 'arg20', 'arg21', 'arg22', 'arg23', 'arg24', 'arg25', 'arg26', 'arg27', 'arg28', 'arg29', 'arg30'])
output = """Displaying detailed information for task 'do_nothing':
Awesome docstring
Arguments: test='default'
"""
details = parse_task_details('do_nothing', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'do_nothing')
self.assertEqual(details[1], 'Awesome docstring')
self.assertEqual(len(details[2]), 1)
self.assertIsInstance(details[2][0], tuple)
self.assertTupleEqual(details[2][0], ('test', 'default'))
output = """Displaying detailed information for task 'do_nothing':
Awesome docstring
Arguments: test='default', test2
"""
details = parse_task_details('do_nothing', output)
self.assertEqual(len(details), 3)
self.assertEqual(details[0], 'do_nothing')
self.assertEqual(details[1], 'Awesome docstring')
self.assertEqual(len(details[2]), 2)
self.assertIsInstance(details[2][0], tuple)
self.assertTupleEqual(details[2][0], ('test', 'default'))
self.assertIsInstance(details[2][1], str)
self.assertEqual(details[2][1], 'test2')
| |
# Copyright 2020 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This tests the Pub/Sub image processing sample
import os
import subprocess
import time
import uuid
from google.api_core.exceptions import NotFound
from google.cloud import pubsub_v1
from google.cloud import storage
from google.cloud.storage import Blob, notification
import pytest
SUFFIX = uuid.uuid4().hex[0:6]
PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"]
IMAGE_NAME = f"gcr.io/{PROJECT}/image-proc-{SUFFIX}"
CLOUD_RUN_SERVICE = f"image-proc-{SUFFIX}"
INPUT_BUCKET = f"image-proc-input-{SUFFIX}"
OUTPUT_BUCKET = f"image-proc-output-{SUFFIX}"
TOPIC = f"image_proc_{SUFFIX}"
@pytest.fixture
def container_image():
# Build container image for Cloud Run deployment
subprocess.check_call(
[
"gcloud",
"builds",
"submit",
"--tag",
IMAGE_NAME,
"--project",
PROJECT,
"--quiet",
]
)
yield IMAGE_NAME
# Delete container image
subprocess.check_call(
[
"gcloud",
"container",
"images",
"delete",
IMAGE_NAME,
"--quiet",
"--project",
PROJECT,
]
)
@pytest.fixture
def deployed_service(container_image, output_bucket):
# Deploy image to Cloud Run
subprocess.check_call(
[
"gcloud",
"run",
"deploy",
CLOUD_RUN_SERVICE,
"--image",
container_image,
"--region=us-central1",
"--project",
PROJECT,
"--platform=managed",
"--set-env-vars",
f"BLURRED_BUCKET_NAME={output_bucket.name}",
"--no-allow-unauthenticated",
]
)
yield CLOUD_RUN_SERVICE
subprocess.check_call(
[
"gcloud",
"run",
"services",
"delete",
CLOUD_RUN_SERVICE,
"--platform=managed",
"--region=us-central1",
"--quiet",
"--async",
"--project",
PROJECT,
]
)
@pytest.fixture
def service_url(deployed_service):
# Get the URL for the cloud run service
service_url = subprocess.run(
[
"gcloud",
"run",
"--project",
PROJECT,
"services",
"describe",
deployed_service,
"--platform=managed",
"--region=us-central1",
"--format=value(status.url)",
],
stdout=subprocess.PIPE,
check=True,
).stdout.strip()
yield service_url.decode()
@pytest.fixture()
def pubsub_topic():
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(PROJECT, TOPIC)
publisher.create_topic(request={"name": topic_path})
yield TOPIC
try:
publisher.delete_topic(request={"topic": topic_path})
except NotFound:
print("Topic not found, it was either never created or was already deleted.")
@pytest.fixture(autouse=True)
def pubsub_subscription(pubsub_topic, service_url):
# Create pubsub push subscription to Cloud Run Service
# Attach service account with Cloud Run Invoker role
# See tutorial for details on setting up service-account:
# https://cloud.google.com/run/docs/tutorials/pubsub
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
subscription_id = f"{pubsub_topic}_sub"
topic_path = publisher.topic_path(PROJECT, pubsub_topic)
subscription_path = subscriber.subscription_path(PROJECT, subscription_id)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=service_url,
oidc_token=pubsub_v1.types.PushConfig.OidcToken(
service_account_email=f"cloud-run-invoker@{PROJECT}.iam.gserviceaccount.com"
),
)
# wrapping in 'with' block automatically calls close on gRPC channel
with subscriber:
subscriber.create_subscription(
request={
"name": subscription_path,
"topic": topic_path,
"push_config": push_config,
}
)
yield
subscriber = pubsub_v1.SubscriberClient()
# delete subscription
with subscriber:
try:
subscriber.delete_subscription(request={"subscription": subscription_path})
except NotFound:
print(
"Unable to delete - subscription either never created or already deleted."
)
@pytest.fixture()
def input_bucket(pubsub_topic):
# Create GCS Bucket
storage_client = storage.Client()
storage_client.create_bucket(INPUT_BUCKET)
# Get input bucket
input_bucket = storage_client.get_bucket(INPUT_BUCKET)
# Create pub/sub notification on input_bucket
notification.BucketNotification(
input_bucket,
topic_name=pubsub_topic,
topic_project=PROJECT,
payload_format="JSON_API_V1",
).create()
yield input_bucket
# Delete GCS bucket
input_bucket.delete(force=True)
@pytest.fixture()
def output_bucket(pubsub_topic):
# Create GCS Bucket
storage_client = storage.Client()
storage_client.create_bucket(OUTPUT_BUCKET)
# Get output bucket
output_bucket = storage_client.get_bucket(OUTPUT_BUCKET)
yield output_bucket
# Delete GCS bucket
output_bucket.delete(force=True)
def test_end_to_end(input_bucket, output_bucket):
# Upload image to the input bucket
blob = Blob("zombie.jpg", input_bucket)
blob.upload_from_filename("test-images/zombie.jpg", content_type="image/jpeg")
# Wait for image processing to complete
time.sleep(30)
for x in range(10):
# Check for blurred image in output bucket
output_blobs = list(output_bucket.list_blobs())
if len(output_blobs) > 0:
break
time.sleep(5)
assert len(output_blobs) > 0
| |
# Copyright (C) 2018-2019 SignalFx, Inc. All rights reserved.
# Copyright (C) 2020 Splunk, Inc. All rights reserved.
import functools
from .metadata import MetricMetadata
import pyformance.registry
import re
import time
from pyformance.registry import (clear, count_calls, dump_metrics, # noqa
global_registry, meter_calls,
set_global_registry, time_calls)
class MetricsRegistry(pyformance.registry.MetricsRegistry):
"""An extension of the pyformance MetricsRegistry
which accepts and manages dimensional data to emit to SignalFx
"""
def __init__(self, clock=time):
self.metadata = MetricMetadata()
super(MetricsRegistry, self).__init__(clock=clock)
def add(self, key, metric, **dims):
"""Adds custom metric instances to the registry with dimensions
which are not created with their constructors default arguments
"""
return super(MetricsRegistry, self).add(
self.metadata.register(key, **dims), metric)
def counter(self, key, **dims):
"""Adds counter with dimensions to the registry"""
return super(MetricsRegistry, self).counter(
self.metadata.register(key, **dims))
def histogram(self, key, **dims):
"""Adds histogram with dimensions to the registry"""
return super(MetricsRegistry, self).histogram(
self.metadata.register(key, **dims))
def gauge(self, key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the registry"""
return super(MetricsRegistry, self).gauge(
self.metadata.register(key, **dims), gauge=gauge, default=default)
def meter(self, key, **dims):
"""Adds meter with dimensions to the registry"""
return super(MetricsRegistry, self).meter(
self.metadata.register(key, **dims))
def timer(self, key, **dims):
"""Adds timer with dimensions to the registry"""
return super(MetricsRegistry, self).timer(
self.metadata.register(key, **dims))
def clear(self): # noqa
"""Clears the registered metrics and metadata"""
self.metadata.clear()
super(MetricsRegistry, self).clear()
# set global registry on import to the SignalFx MetricsRegistry
set_global_registry(MetricsRegistry())
class RegexRegistry(MetricsRegistry):
"""
An extension of the pyformance RegexRegistry
which accepts and manages dimensional data to emit to SignalFx.
The RegexRegistry captures all api calls matching the specified
regex patterns and groups them together. This is useful to avoid
defining a metric for each method of a REST API
"""
def __init__(self, pattern=None, clock=time):
super(RegexRegistry, self).__init__(clock)
if pattern is not None:
self.pattern = re.compile(pattern)
else:
self.pattern = re.compile('^$')
def _get_key(self, key):
matches = self.pattern.finditer(key)
key = '/'.join((v for match in matches for v in match.groups() if v))
return key
def timer(self, key, **dims):
"""Adds timer with dimensions to the registry"""
return super(RegexRegistry, self).timer(self._get_key(key), **dims)
def histogram(self, key, **dims):
"""Adds histogram with dimensions to the registry"""
return super(RegexRegistry, self).histogram(self._get_key(key), **dims)
def counter(self, key, **dims):
"""Adds counter with dimensions to the registry"""
return super(RegexRegistry, self).counter(self._get_key(key), **dims)
def gauge(self, key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the registry"""
return super(RegexRegistry, self).gauge(
self._get_key(key), gauge=gauge, default=default, **dims)
def meter(self, key, **dims):
"""Adds meter with dimensions to the registry"""
return super(RegexRegistry, self).meter(self._get_key(key), **dims)
def counter(key, **dims):
"""Adds counter with dimensions to the global pyformance registry"""
return global_registry().counter(key, **dims)
def histogram(key, **dims):
"""Adds histogram with dimensions to the global pyformance registry"""
return global_registry().histogram(key, **dims)
def meter(key, **dims):
"""Adds meter with dimensions to the global pyformance registry"""
return global_registry().meter(key, **dims)
def timer(key, **dims):
"""Adds timer with dimensions to the global pyformance registry"""
return global_registry().timer(key, **dims)
def gauge(key, gauge=None, default=float("nan"), **dims):
"""Adds gauge with dimensions to the global pyformance registry"""
return global_registry().gauge(key, gauge=gauge, default=default, **dims)
def count_calls_with_dims(**dims):
"""Decorator to track the number of times a function is called
with with dimensions.
"""
def counter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
counter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).inc()
return fn(*args, **kwargs)
return fn_wrapper
return counter_wrapper
def meter_calls_with_dims(**dims):
"""Decorator to track the rate at which a function is called
with dimensions.
"""
def meter_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
meter("%s_calls" %
pyformance.registry.get_qualname(fn), **dims).mark()
return fn(*args, **kwargs)
return fn_wrapper
return meter_wrapper
# TODO: raise bug with pyformance on their implementation of hist_calls
# _histogram does not have an update method so use add instead
def hist_calls(fn):
"""
Decorator to check the distribution of return values of a function.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn))
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return wrapper
def hist_calls_with_dims(**dims):
"""Decorator to check the distribution of return values of a
function with dimensions.
"""
def hist_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_histogram = histogram(
"%s_calls" % pyformance.registry.get_qualname(fn), **dims)
rtn = fn(*args, **kwargs)
if type(rtn) in (int, float):
_histogram.add(rtn)
return rtn
return fn_wrapper
return hist_wrapper
def time_calls_with_dims(**dims):
"""Decorator to time the execution of the function with dimensions."""
def time_wrapper(fn):
@functools.wraps(fn)
def fn_wrapper(*args, **kwargs):
_timer = timer("%s_calls" %
pyformance.registry.get_qualname(fn), **dims)
with _timer.time(fn=pyformance.registry.get_qualname(fn)):
return fn(*args, **kwargs)
return fn_wrapper
return time_wrapper
| |
import pytest
import mock
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
PreprintProviderFactory,
)
from website.util import permissions as osf_permissions
from reviews.permissions import GroupHelper
from api_tests.reviews.mixins.filter_mixins import ActionFilterMixin
class TestActionFilters(ActionFilterMixin):
@pytest.fixture()
def url(self):
return '/{}users/me/actions/'.format(API_BASE)
@pytest.fixture()
def expected_actions(self, all_actions, allowed_providers):
actions = super(TestActionFilters, self).expected_actions(all_actions, allowed_providers)
node = actions[0].target.node
node.is_public = False
node.save()
return [a for a in actions if a.target.node.is_public]
def test_no_permission(self, app, url, expected_actions):
res = app.get(url, expect_errors=True)
assert res.status_code == 401
some_rando = AuthUserFactory()
res = app.get(url, auth=some_rando.auth)
assert not res.json['data']
@pytest.mark.django_db
class TestActionCreate(object):
def create_payload(self, reviewable_id=None, **attrs):
payload = {
'data': {
'attributes': attrs,
'relationships': {},
'type': 'actions'
}
}
if reviewable_id:
payload['data']['relationships']['target'] = {
'data': {
'type': 'preprints',
'id': reviewable_id
}
}
return payload
@pytest.fixture()
def url(self):
return '/{}actions/'.format(API_BASE)
@pytest.fixture()
def provider(self):
return PreprintProviderFactory(reviews_workflow='pre-moderation')
@pytest.fixture()
def node_admin(self):
return AuthUserFactory()
@pytest.fixture()
def preprint(self, node_admin, provider):
preprint = PreprintFactory(provider=provider, node__creator=node_admin, is_published=False)
preprint.node.add_contributor(node_admin, permissions=[osf_permissions.ADMIN])
return preprint
@pytest.fixture()
def moderator(self, provider):
moderator = AuthUserFactory()
moderator.groups.add(GroupHelper(provider).get_group('moderator'))
return moderator
@mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
def test_create_permissions(self, mock_ezid, app, url, preprint, node_admin, moderator):
assert preprint.reviews_state == 'initial'
submit_payload = self.create_payload(preprint._id, trigger='submit')
# Unauthorized user can't submit
res = app.post_json_api(url, submit_payload, expect_errors=True)
assert res.status_code == 401
# A random user can't submit
some_rando = AuthUserFactory()
res = app.post_json_api(url, submit_payload, auth=some_rando.auth, expect_errors=True)
assert res.status_code == 403
# Node admin can submit
res = app.post_json_api(url, submit_payload, auth=node_admin.auth)
assert res.status_code == 201
preprint.refresh_from_db()
assert preprint.reviews_state == 'pending'
assert not preprint.is_published
accept_payload = self.create_payload(preprint._id, trigger='accept', comment='This is good.')
# Unauthorized user can't accept
res = app.post_json_api(url, accept_payload, expect_errors=True)
assert res.status_code == 401
# A random user can't accept
res = app.post_json_api(url, accept_payload, auth=some_rando.auth, expect_errors=True)
assert res.status_code == 403
# Moderator from another provider can't accept
another_moderator = AuthUserFactory()
another_moderator.groups.add(GroupHelper(PreprintProviderFactory()).get_group('moderator'))
res = app.post_json_api(url, accept_payload, auth=another_moderator.auth, expect_errors=True)
assert res.status_code == 403
# Node admin can't accept
res = app.post_json_api(url, accept_payload, auth=node_admin.auth, expect_errors=True)
assert res.status_code == 403
# Still unchanged after all those tries
preprint.refresh_from_db()
assert preprint.reviews_state == 'pending'
assert not preprint.is_published
# Moderator can accept
res = app.post_json_api(url, accept_payload, auth=moderator.auth)
assert res.status_code == 201
preprint.refresh_from_db()
assert preprint.reviews_state == 'accepted'
assert preprint.is_published
# Check if "get_and_set_preprint_identifiers" is called once.
assert mock_ezid.call_count == 1
def test_cannot_create_actions_for_unmoderated_provider(self, app, url, preprint, provider, node_admin):
provider.reviews_workflow = None
provider.save()
submit_payload = self.create_payload(preprint._id, trigger='submit')
res = app.post_json_api(url, submit_payload, auth=node_admin.auth, expect_errors=True)
assert res.status_code == 409
def test_bad_requests(self, app, url, preprint, provider, moderator):
invalid_transitions = {
'post-moderation': [
('accepted', 'accept'),
('accepted', 'submit'),
('initial', 'accept'),
('initial', 'edit_comment'),
('initial', 'reject'),
('pending', 'submit'),
('rejected', 'reject'),
('rejected', 'submit'),
],
'pre-moderation': [
('accepted', 'accept'),
('accepted', 'submit'),
('initial', 'accept'),
('initial', 'edit_comment'),
('initial', 'reject'),
('rejected', 'reject'),
]
}
for workflow, transitions in invalid_transitions.items():
provider.reviews_workflow = workflow
provider.save()
for state, trigger in transitions:
preprint.reviews_state = state
preprint.save()
bad_payload = self.create_payload(preprint._id, trigger=trigger)
res = app.post_json_api(url, bad_payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 409
# test invalid trigger
bad_payload = self.create_payload(preprint._id, trigger='badtriggerbad')
res = app.post_json_api(url, bad_payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 400
# test target is required
bad_payload = self.create_payload(trigger='accept')
res = app.post_json_api(url, bad_payload, auth=moderator.auth, expect_errors=True)
assert res.status_code == 400
@mock.patch('website.preprints.tasks.get_and_set_preprint_identifiers.si')
def test_valid_transitions(self, mock_ezid, app, url, preprint, provider, moderator):
valid_transitions = {
'post-moderation': [
('accepted', 'edit_comment', 'accepted'),
('accepted', 'reject', 'rejected'),
('initial', 'submit', 'pending'),
('pending', 'accept', 'accepted'),
('pending', 'edit_comment', 'pending'),
('pending', 'reject', 'rejected'),
('rejected', 'accept', 'accepted'),
('rejected', 'edit_comment', 'rejected'),
],
'pre-moderation': [
('accepted', 'edit_comment', 'accepted'),
('accepted', 'reject', 'rejected'),
('initial', 'submit', 'pending'),
('pending', 'accept', 'accepted'),
('pending', 'edit_comment', 'pending'),
('pending', 'reject', 'rejected'),
('pending', 'submit', 'pending'),
('rejected', 'accept', 'accepted'),
('rejected', 'edit_comment', 'rejected'),
('rejected', 'submit', 'pending'),
],
}
for workflow, transitions in valid_transitions.items():
provider.reviews_workflow = workflow
provider.save()
for from_state, trigger, to_state in transitions:
preprint.reviews_state = from_state
preprint.is_published = False
preprint.date_published = None
preprint.date_last_transitioned = None
preprint.save()
payload = self.create_payload(preprint._id, trigger=trigger)
res = app.post_json_api(url, payload, auth=moderator.auth)
assert res.status_code == 201
action = preprint.actions.order_by('-date_created').first()
assert action.trigger == trigger
preprint.refresh_from_db()
assert preprint.reviews_state == to_state
if preprint.in_public_reviews_state:
assert preprint.is_published
assert preprint.date_published == action.date_created
assert mock_ezid.called
mock_ezid.reset_mock()
else:
assert not preprint.is_published
assert preprint.date_published is None
assert not mock_ezid.called
if trigger == 'edit_comment':
assert preprint.date_last_transitioned is None
else:
assert preprint.date_last_transitioned == action.date_created
| |
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 5023 2010/06/14 22:05:46 scons"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from contextlib import closing
import os
import re
from tempfile import NamedTemporaryFile
import requests
from beets import plugins
from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.util.artresizer import ArtResizer
try:
import itunes
HAVE_ITUNES = True
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg', 'image/png')
DOWNLOAD_EXTENSION = '.jpg'
def _logged_get(log, *args, **kwargs):
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ('stream', 'verify', 'proxies', 'cert', 'timeout'):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
# Our special logging message parameter.
if 'message' in kwargs:
message = kwargs.pop('message')
else:
message = 'getting URL'
req = requests.Request('GET', *args, **req_kwargs)
with requests.Session() as s:
s.headers = {'User-Agent': 'beets'}
prepped = s.prepare_request(req)
log.debug('{}: {}', message, prepped.url)
return s.send(prepped, **send_kwargs)
class RequestMixin(object):
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
def request(self, *args, **kwargs):
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log):
self._log = log
def get(self, album):
raise NotImplementedError()
class CoverArtArchive(ArtSource):
"""Cover Art Archive"""
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
yield self.URL.format(mbid=album.mb_albumid)
if album.mb_releasegroupid:
yield self.GROUP_URL.format(mbid=album.mb_releasegroupid)
class Amazon(ArtSource):
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
for index in self.INDICES:
yield self.URL % (album.asin, index)
class AlbumArtOrg(ArtSource):
"""AlbumArt.org scraper"""
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={'asin': album.asin})
self._log.debug(u'scraped art URL: {0}', resp.url)
except requests.RequestException:
self._log.debug(u'error scraping art page')
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield image_url
else:
self._log.debug(u'no image found on page')
class GoogleImages(ArtSource):
URL = 'https://ajax.googleapis.com/ajax/services/search/images'
def get(self, album):
"""Return art URL from google.org given an album title and
interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ',' + album.album).encode('utf-8')
response = self.request(self.URL, params={
'v': '1.0',
'q': search_string,
'start': '0',
})
# Get results using JSON.
try:
results = response.json()
data = results['responseData']
dataInfo = data['results']
for myUrl in dataInfo:
yield myUrl['unescapedUrl']
except:
self._log.debug(u'error scraping art page')
return
class ITunesStore(ArtSource):
# Art from the iTunes Store.
def get(self, album):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
return
search_string = (album.albumartist + ' ' + album.album).encode('utf-8')
try:
# Isolate bugs in the iTunes library while searching.
try:
itunes_album = itunes.search_album(search_string)[0]
except Exception as exc:
self._log.debug('iTunes search failed: {0}', exc)
return
if itunes_album.get_artwork()['100']:
small_url = itunes_album.get_artwork()['100']
big_url = small_url.replace('100x100', '1200x1200')
yield big_url
else:
self._log.debug(u'album has no artwork in iTunes Store')
except IndexError:
self._log.debug(u'album not found in iTunes Store')
class Wikipedia(ArtSource):
# Art from Wikipedia (queried through DBpedia)
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
SPARQL_QUERY = '''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type <http://dbpedia.org/ontology/Album> .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1'''
def get(self, album):
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
'format': 'application/sparql-results+json',
'timeout': 2500,
'query': self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album)
},
headers={'content-type': 'application/json'},
)
try:
data = dbpedia_response.json()
results = data['results']['bindings']
if results:
cover_filename = 'File:' + results[0]['coverFilename']['value']
page_id = results[0]['pageId']['value']
else:
self._log.debug('wikipedia: album not found on dbpedia')
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping dbpedia response: {}',
dbpedia_response.text)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if ' .' in cover_filename and \
'.' not in cover_filename.split(' .')[-1]:
self._log.debug(
'wikipedia: dbpedia provided incomplete cover_filename'
)
lpart, rpart = cover_filename.rsplit(' .', 1)
# Query all the images in the page
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'images',
'pageids': page_id,
},
headers={'content-type': 'application/json'},
)
# Try to see if one of the images on the pages matches our
# imcomplete cover_filename
try:
data = wikipedia_response.json()
results = data['query']['pages'][page_id]['images']
for result in results:
if re.match(re.escape(lpart) + r'.*?\.' + re.escape(rpart),
result['title']):
cover_filename = result['title']
break
except (ValueError, KeyError):
self._log.debug(
'wikipedia: failed to retrieve a cover_filename'
)
return
# Find the absolute url of the cover art on Wikipedia
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
'format': 'json',
'action': 'query',
'continue': '',
'prop': 'imageinfo',
'iiprop': 'url',
'titles': cover_filename.encode('utf-8'),
},
headers={'content-type': 'application/json'},
)
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.iteritems():
image_url = result['imageinfo'][0]['url']
yield image_url
except (ValueError, KeyError, IndexError):
self._log.debug('wikipedia: error scraping imageinfo')
return
class FileSystem(ArtSource):
"""Art from the filesystem"""
@staticmethod
def filename_priority(filename, cover_names):
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, path, cover_names, cautious):
"""Look for album art files in a specified directory.
"""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \
os.path.isfile(os.path.join(path, fn)):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(images,
key=lambda x: self.filename_priority(x, cover_names))
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names))
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(u'using well-named art file {0}',
util.displayable_path(fn))
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images and not cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(images[0]))
return os.path.join(path, images[0])
# Try each source in turn.
SOURCES_ALL = [u'coverart', u'itunes', u'amazon', u'albumart', u'google',
u'wikipedia']
ART_SOURCES = {
u'coverart': CoverArtArchive,
u'itunes': ITunesStore,
u'albumart': AlbumArtOrg,
u'amazon': Amazon,
u'google': GoogleImages,
u'wikipedia': Wikipedia,
}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.config.add({
'auto': True,
'minwidth': 0,
'maxwidth': 0,
'enforce_ratio': False,
'remote_priority': False,
'cautious': False,
'cover_names': ['cover', 'front', 'art', 'album', 'folder'],
'sources': ['coverart', 'itunes', 'amazon', 'albumart'],
})
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
self.minwidth = self.config['minwidth'].get(int)
self.maxwidth = self.config['maxwidth'].get(int)
self.enforce_ratio = self.config['enforce_ratio'].get(bool)
if self.config['auto']:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
available_sources = list(SOURCES_ALL)
if not HAVE_ITUNES and u'itunes' in available_sources:
available_sources.remove(u'itunes')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
self.sources = [ART_SOURCES[s](self._log) for s in sources_name]
self.fs_source = FileSystem(self._log)
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
path = self.art_for_album(task.album, task.paths, local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, session, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = task.album
src_removed = (config['import']['delete'].get(bool) or
config['import']['move'].get(bool))
album.set_art(path, not src_removed)
album.store()
if src_removed:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def _fetch_image(self, url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
try:
with closing(self.request(url, stream=True,
message='downloading image')) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug(
'not a supported image: {}',
resp.headers.get('Content-Type') or 'no content type',
)
return None
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION,
delete=False) as fh:
for chunk in resp.iter_content(chunk_size=1024):
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
return fh.name
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug('error fetching art: {}', exc)
return None
def _is_valid_image_candidate(self, candidate):
if not candidate:
return False
if not (self.enforce_ratio or self.minwidth):
return True
# get_size returns None if no local imaging backend is available
size = ArtResizer.shared.get_size(candidate)
if not size:
self._log.warning(u'could not verify size of image: please see '
u'documentation for dependencies. '
u'The configuration options `minwidth` and '
u'`enforce_ratio` may be violated.')
return True
return size and size[0] >= self.minwidth and \
(not self.enforce_ratio or size[0] == size[1])
def art_for_album(self, album, paths, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
out = None
# Local art.
cover_names = self.config['cover_names'].as_str_seq()
cover_names = map(util.bytestring_path, cover_names)
cautious = self.config['cautious'].get(bool)
if paths:
for path in paths:
candidate = self.fs_source.get(path, cover_names, cautious)
if self._is_valid_image_candidate(candidate):
out = candidate
self._log.debug('found local image {}', out)
break
# Web art sources.
remote_priority = self.config['remote_priority'].get(bool)
if not local_only and (remote_priority or not out):
for url in self._source_urls(album):
if self.maxwidth:
url = ArtResizer.shared.proxy_url(self.maxwidth, url)
candidate = self._fetch_image(url)
if self._is_valid_image_candidate(candidate):
out = candidate
self._log.debug('using remote image {}', out)
break
if self.maxwidth and out:
out = ArtResizer.shared.resize(self.maxwidth, out)
return out
def batch_fetch_art(self, lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force:
message = ui.colorize('text_highlight_minor', 'has album art')
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
path = self.art_for_album(album, local_paths)
if path:
album.set_art(path, False)
album.store()
message = ui.colorize('text_success', 'found album art')
else:
message = ui.colorize('text_error', 'no art found')
self._log.info(u'{0}: {1}', album, message)
def _source_urls(self, album):
"""Generate possible source URLs for an album's art. The URLs are
not guaranteed to work so they each need to be attempted in turn.
This allows the main `art_for_album` function to abort iteration
through this sequence early to avoid the cost of scraping when not
necessary.
"""
source_names = {v: k for k, v in ART_SOURCES.items()}
for source in self.sources:
self._log.debug(
'trying source {0} for album {1.albumartist} - {1.album}',
source_names[type(source)],
album,
)
urls = source.get(album)
for url in urls:
yield url
| |
try:
import psyco
psyco.full()
except ImportError: pass
__doc__ = """
The present module Generative Linear Gaussian Models (GLGM) implements in Python programming language
the following models based mainly on references nr.1 (see References below):
x(t+1) = Ax(t) + w(t) = Ax(t) + w0, w0 = N(0, Q)
y(t) = Cx(t) + v(t) = Cx(t) + v0, v0 = N(0, R)
where:
x is a (k,1) vector of latent factors or state or causes (hidden) variables
y is a (p,1) vector of observations
A is a (k,k) matrix of states transition
C is a (p,k) matrix of observation measurement or generative matrix
w is a (k,1) vector of state evolution White Gaussian Noise (WGN)
v is a (k,1) vector of observation evolution WGN
w and v are statistically independent of each other and of x and y
N stands for for Gaussian or Normal probability density function (pdf)
In particular the module aims to implement the following:
- fa (Factor Analysis)
- ppca (Probabilistic Principal Component Analysis)
- pca (Principal Component Analysis)
- mog (Mixture of Gaussians)
- vq (Vector Quantization)
- k-means clustering
- mofa (Mixture of Factor Analyzers)
- ica (Independent Component Analysis)
References:
1 Unifying Review of Linear Gaussian Models
Roweis, Ghahramani
Neural Computation 1999 11:2, 305-345.
2 The EM algorithm for mixtures of factor analyzers
Ghahramani, Hinton
1997, Technical Report CRG-TR-96-1
Dept. of Computer Science, University of Toronto, Toronto, Canada, MSS 1A4
3 Max Welling's tutorials
(available @ http://www.ics.uci.edu/~welling/classnotes/classnotes.html)
4 Book of Johnson and Wichern
5 Book of Joreskog
6 Book of Duda, Hart
7 Book of MacKay
8 Book of Golub, Van Loan
9 Maximum Likelihood and Covariant Algorithms for Independent Component Analysis
MacKay
10 Solving inverse problems using an EM approach to density estimation
Ghahramani
11 Maximum likelihood and minimum classification error Factor Analysis for automatic speech recognition
Saul, Rahim
12 Finite mixture models
Geoffrey J. McLachlan, David Peel [book of]
13 Numerical recipes: the art of scientific computing
Press et al [book of]
14 Unsupervised Classification with Non-Gaussian Mixture Models Using ICA
Lee, Lewicki, Sejnowski
15 EM Algorithms for PCA and SPCA
Roweis
16 A tutorial on Hidden Markov Models and selected applications in speech recognition
Rabiner
17 Maximum Likelihood Estimation for Multivariate Mixture Observations of Markov Chains
Juang, Levinson, Sondhi
NB Matrix x and y have both shape given by the tuple ('variables nr','samples nr').
"""
from numpy import (array, arange, dot, inner, outer, vdot, cov,
diag, ones, eye, zeros, argmax, nanargmax,
mean, std, multiply, sum, product, sqrt,
log, abs, exp, power, hstack, vstack, append,
concatenate, pi, inf, amin, amax, empty,
tanh, any, isnan)
from scipy.linalg import (norm, inv, det, svd, solve, cholesky)
from numpy.random import (normal, randn, rand, multivariate_normal,
uniform)
class lm(object):
"""See details explained above"""
y, n, p = _default_values_ = None, 0, None
cumulate = True
def __init__(self, y, *args, **kwargs):
if not(hasattr(y, '__iter__')):
raise AttributeError('Input has to an iterable')
self.y = y
self.p, self.n = y.shape
def __call__(self, **kw):
return self.InferandLearn(**kw)
def InferandLearn(self, max_iter_nr = 20, **kwargs):
"""
This method implement and run the EM algorithm, in order to:
- learn model's parameter A, C, Q, R (learning or system identification)
- estimate hidden states from observations (inference or filtering or smoothing)
Inference and Learning, respectively in methods E and M, are overrided by subclasses.
"""
if not isinstance(max_iter_nr, int):
raise TypeError('The maximum number of iterations of the EM procedure must be an integer')
if max_iter_nr <= 0:
raise ValueError('The maximum number of iterations of the EM procedure must be positive')
for kw, val in kwargs.iteritems():
self.kw = val
E_EM, M_EM = self.Inference, self.Learning
logLH, Break = self.logLikelihood, self.break_condition
self.logLH_delta = kwargs.get('logLH_delta', None)
for iter_nr in xrange(max_iter_nr):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# E step
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
E_EM()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# log-LH
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
logLH()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# M step
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
M_EM()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Break condition of the for loop
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if Break(): break
self.iter_nr = iter_nr
self.trained = True
def next(self, **kwargs):
"""Calling InferandLearn method to run only one iteration"""
self.InferandLearn(max_iter_nr = 1, **kwargs)
def Inference(self):
"""The E(xpectation) step of EM algorithm"""
raise NotImplementedError('Method Inference not implemented in base class')
def Learning(self):
"""The M(aximization) step of EM algorithm"""
raise NotImplementedError('Method Learning not implemented in base classi')
def break_condition(self):
"""A method verifying an ad-hoc condition to exit from EM iter."""
raise NotImplementedError('Method break_condition not implemented in base class')
def logLikelihood(self):
"""The logLikelihood method for the given model"""
raise NotImplementedError('Method logLikelihood not implemented in base class')
def mu_y(self):
return mean(self.y, axis = 1).reshape(self.p, 1)
def cov_obs(self, cov_bias = 1):
return cov(self.y, bias = cov_bias)
def centered_input(self):
return self.y - self.mu_y()
def erase(self):
self.y, self.n, self.p = lm._default_values_
class fa(lm):
"""
Factor Analysis model of static data y.
Model:
A = 0 (because data are static in time)
x = x0 = w0, w0 = N(0, Q)
y = y0 = Cx + v0, v0 = N(0, R)
then
y ~ N(0, CQC.T + R)
and in order to solve any model degeneracy
Q = I
R is diagonal
finally
y ~ N(0, C*C.T + R)
x_y ~ N(beta*y, I-beta*C) useful for the Inference task
beta = C.T(C*C.T + R)**-1
C is also called the factor loadings matrix,
R's diagonal elements are the uniquenesses,
v the sensor noise.
Hint: apply Template Design Pattern to scale-down from fa
to spca (or ppca), pca and whitening sub-classes.
Based on ref.1
"""
k = None
def __init__(self, y, k):
super(fa, self).__init__(y)
if not isinstance(k, int):
raise TypeError('k (the number of latent factors) must be an integer')
if k <= 0:
raise ValueError('k (the number of latent factors) must be positive')
if k > self.p:
raise ValueError('k (the number of latent factors) must not be greater than p (the number of observables)')
self.k = k
self.initialize()
def initialize_Q(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Covariance matrix Q of hidden factors = I matrix
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.Q = eye(self.k)
def initialize_C(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Following MDP init settings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
scale = product(self.yyTdiag) ** (1./self.p)
assert scale > 0, "Input covariance matrix is singular"
self.C = normal(0, sqrt(scale / self.k), size = (self.p, self.k))
def initialize_R(self, with_WN = False):
self.R = self.yyTdiag
if with_WN: self.R += randn(self.p)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# From pag.531 of ref.4
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## self.R = (1-.5*self.p/self.k)*self.yyTdiag
def initializelogLH(self):
self.logLH = -inf
self.deltalogLH = inf
self.logLH_const = -.5 * self.p * log(2. * pi)
self.logLH_tracks = []
self.logLH__break = False
def initialize(self):
"""Initialization step: C and other vars, get observed data covariance"""
self.arangek, self.arangep = arange(self.k), arange(self.p)
self.yyT = self.cov_obs()
self.yyTdiag = self.yyT[self.arangep, self.arangep]
self.initialize_C()
self.initialize_R()
self.initialize_Q()
self.initializelogLH()
def InferandLearn(self, max_iter_nr = 20, logLH_delta = 1e-3,
inferwithLemma = True, **kwargs):
self.betaInferenceMethod = self.betaInferenceLemma \
if inferwithLemma else self.betaInference
super(fa, self).InferandLearn(max_iter_nr = max_iter_nr,
logLH_delta = logLH_delta)
def break_condition(self):
if -self.logLH_delta < self.deltalogLH < self.logLH_delta:
self.logLH__break = True
return True
return False
def betaInferenceLemma(self):
C, CT, Rinv = self.C, self.C.T, self.R ** -1
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying the matrix inversion lemma
# See: - pag.334 in ref.1
# - aka Sherman-Morrison-Woodbury formula at pag.50
# in ref.8 (formula (2.1.4)
# - aka binomial inverse theorem at
# http://en.wikipedia.org/wiki/Binomialinverse_theorem
# - or derived from matrix blockwise inversion as in
# http://en.wikipedia.org/wiki/Invertible_matrix#Blockwiseinversion
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
beta_temp = multiply(CT, Rinv)
beta = dot(beta_temp, C)
beta[self.arangek, self.arangek] += 1.
beta = -dot(C, dot(inv(beta), beta_temp))
beta[self.arangep, self.arangep] += 1.
self.logLH_temp = multiply(Rinv.reshape(self.p, 1), beta)
# self.logLH_temp = Rinv * beta #multiply(Rinv, beta)
self.beta = dot(beta_temp, beta)
def betaInference(self):
C, R = self.C, self.R
CT = C.T
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying the classical method to invert beta
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
beta = dot(C, CT)
beta[self.arangep, self.arangep] += R
self.logLH_temp = beta # = inv(beta)
self.beta = dot(CT, inv(beta)) #beta)
def Inference(self):
"""
E step of EM algorithm
Inference of sufficient statistic E(x|y) (here x_y)
NB Computing beta via the matrix inversion lemma,
in place of apply ordinary formulas, does not
bring performances improvements.
NB Following code is very inefficient:
beta.ravel()[arange(0, k**2, k) + arange(k)] += 1.
beta.ravel()[arange(0, p**2, p) + arange(p)] += 1.
self.V.ravel()[arange(0, k**2, k) + arange(k)] += 1.
TODO Test if betaInferenceLemma gives performances advantages
for (very) high p.
"""
self.betaInferenceMethod()
self.V = - dot(self.beta, self.C)
self.V[self.arangek, self.arangek] += 1.
def Learning_R(self):
"""
Learning R in M step of EM algorithm
It is necessary to learn R separately in order to give
sub-classes able to override the present method
"""
self.R = self.yyTdiag - sum(multiply(self.C, self.delta), axis=1) / self.n
def Learning(self):
"""
M step of EM algorithm
Computing delta and gamma (ref.1 at pag.335).
Learning and updating model's parameters C and R
"""
delta = dot(self.yyT, self.beta.T)
self.gamma = self.n * (dot(self.beta, delta) + self.V)
delta *= self.n
self.delta = delta
self.C = dot(self.delta, inv(self.gamma))
self.Learning_R()
def logLikelihood(self, mu = None):
"""
Log-likelihood (LogLH)
This the LogLH per sample, apart the constant self.logLH_const and
factor .5 both affecting very sligthly the logLH values convergence
to a (local, hopefully global) maximum.
"""
logLH_old = self.logLH
_yyT = self.yyT - dot(mu, mu.T) if mu != None else self.yyT
self.logLH = self.logLH_const - .5 * (-log(det(self.logLH_temp)) + \
vdot(_yyT, self.logLH_temp))
self.deltalogLH = self.logLH - logLH_old
self.logLH_tracks.append(self.logLH)
def infer(self):
return self.get_expected_latent(), self.infer_observed()
def get_expected_latent(self, z):
if not(self.trained): self.InferandLearn()
return dot(self.beta, z - self.mu_y())
#def infer_observed(self, noised = False):
#
# if not(self.trained): self.InferandLearn()
# inf_y = dot(self.C, self.y)) - self.mu_y()
# if noised:
# return inf_y + multivariate_normal(zeros(self.p),
# diag(self.R), self.n).T
# return inf_y
def get_new_observed(self, input, noised = False):
"""input: nr. or latent samples corresponding to new observations in output"""
if not(self.trained):
self.InferandLearn()
if isinstance(input, int):
input = normal(size = (self.k, input))
new_obs = dot(self.C, input) + self.mu_y()
if noised:
new_obs += multivariate_normal(zeros(self.p),
diag(self.R), input.shape[1]).T
return new_obs
class spca(fa):
"""
Sensible (or Probabilistic) Principal Component Analysis. See ref.1 and 15
"""
def initialize_R(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Covariance matrix R of observations = const*I matrix
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.R = ones(self.p) * mean(self.yyTdiag)
def Learning_R(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Covariance matrix R of observations = const*I matrix
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.R = ones(self.p) * mean(self.yyTdiag - \
sum(multiply(self.C, self.delta), axis=1) / self.n)
class ppca(spca):
"""Alias for spca"""
pass
class pca(fa):
"""
EM algorithm for Principal Component Analysis. See ref.1 and 15.
TODO: Check EM method in order to get results comparable with the svd's ones
"""
def __init__(self, y, k = None):
super(fa, self).__init__(y)
if not isinstance(k, int):
raise TypeError('k (the number of latent factors) must be an integer')
if k <= 0:
raise ValueError('k (the number of latent factors) must be positive')
if k > self.p:
raise ValueError('k (the number of latent factors) must not be greater than p (the number of observables)')
self.k = k
self.initialize()
def initialize_C(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Following MDP init settings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
scale = 1. #product(self.yyTdiag) ** (1./self.p)
self.C = normal(0, sqrt(scale / self.k), size = (self.p, self.k))
self.C = array([e / norm(e) for e in self.C.T]).T
def initialize_R(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Covariance matrix R of observations = Zeros matrix
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.R = zeros(self.p)
def Inference(self):
self.betaInference()
def betaInference(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Read note 10, pag.318, ref.1 about the fomula of beta
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CT = self.C.T
self.beta = dot(inv(dot(CT, self.C)), CT)
def InferandLearn(self, max_iter_nr = 20, svd_on = True, **kwargs):
self.V = zeros((self.k, self.k))
if svd_on:
self.svd_on = True
self.svd()
else:
self.svd_on = False
lm.InferandLearn(self, max_iter_nr = max_iter_nr, **kwargs)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# According to ref.15: "...The columns of C will span the
# space of the first k principal components. (To compute
# the corresponding eigenvectors and eigenvalues explicitly,
# the data can be projected into this-dimensional subspace
# and an ordered orthogonal basis for the covariance in the
# subspace can be constructed.)..."
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.projOrthogBasis()
def projOrthogBasis(self):
xlatent = self.get_expected_latent(self.y)
U, s, V = svd(xlatent-xlatent.mean(axis=1).reshape(self.k, 1),
full_matrices = False)
self._variances = s
self._scores = (s.reshape(self.k, 1) * V)
self._loadings = U
self.C = dot(self.C, U)
self.trained = True
def Learning(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# M step of EM algorithm
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.delta = dot(self.yyT, self.beta.T)
self.gamma = dot(self.beta, self.delta)
self.C = dot(self.delta, inv(self.gamma))
def logLikelihood(self): pass
def Learning_R(self): pass
def logLikelihood(self): pass
def break_condition(self): pass #return self.lse() #based on Least Squares projection error
def lse(self): pass
def svd(self):
"""Finding Principal Components via SVD method"""
U, s, V = svd(self.y-self.mu_y(), full_matrices = False)
self._variances = s
self._scores = (s[:self.k].reshape(self.k, 1) * V[:self.k,:])
self._loadings = U
self.C = U[:,:self.k]
self.trained = True
#def get_expected_latent(self): return dot(self.beta, self.y)
#def get_latent(self): return dot(self.C.T, self.centered_input())
class whitening(pca):
def InferandLearn(self, **kwargs):
pca.InferandLearn(self, svd_on = True, **kwargs)
self.C /= sqrt(self.VarSvd[:self.k]).reshape(1, self.k)
class mixture(lm):
m, typePrior_mu = None, ''
def initialize(self):
"""
Initialization step
- pi with random values sum up to unity
- mu from a Uniform pdf in the range of min/max of input data
or a Gaussian pdf with mean and var of input data
- sigma with a random square matrix for each cluster
"""
self.arangep = arange(self.p)
self.yyT = self.cov_obs()
self.yyTdiag = self.yyT[self.arangep, self.arangep]
self.initialize_pi()
self.initialize_mu()
self.initialize_Resp()
self.initialize_sigma()
self.initializelogLH()
def initialize_Resp(self):
self.Resp = rand(self.m, self.n)
self.normalizeResp()
def Resppower(self): pass
def normalizeResp(self):
self.Resp /= self.Resp.sum(axis = 0)
def initialize_pi(self):
self.pi = rand(self.m)
self.pi /= self.pi.sum()
def pi_clusters(self):
self.pi = self.Resp.sum(axis = 1) / self.n
def Normalprior_mu(self, scale = 1):
"""Normal prior on cluster's centroids"""
self.mu = multivariate_normal(self.mu_y().ravel(), \
self.cov_obs() / scale, self.m).reshape(self.m, self.p)
def Uniformprior_mu(self):
"""Uniform prior on cluster's centroids"""
_uniform = lambda i, j: uniform(i, j, self.m)
self.mu = array(map(_uniform, amin(self.y, axis=1), \
amax(self.y, axis=1))).reshape(self.m, self.p)
def initialize_mu(self):
"""Following a Strategy DP"""
if self.typePrior_mu == 'normal' or not(self.typePrior_mu):
self.Normalprior_mu()
if self.typePrior_mu == 'uniform':
self.Uniformprior_mu()
def initializelogLH(self):
self.logLH = -inf
self.deltalogLH = inf
self.logLH_tracks = []
self.logLH__break = False
self.logLH_const = -.5 * self.p * log(2. * pi)
# An help from already available Resp values
self.LH_temp = empty((self.m, self.n))
def CrossProdFactory(self, inv_sigma):
"""Curried method in order to speed up the processing"""
def CrossProdinner(z):
return dot(dot(z, inv_sigma), z.T)
return CrossProdinner
def Inference(self):
"""E step of EM algorithm"""
for Resp_i, pi, mu, sigma, LH in zip(self.Resp, self.pi, self.mu,
self.sigma, self.LH_temp):
yCent_i = self.y - mu.reshape(self.p, 1)
try:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# There's no need to multiply const_i by (2*pi)**self.p
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
const_i = abs(det(sigma)) ** -.5
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Currying CrossProdFactory with inv(self.sigma[i])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
_CrossProd = self.CrossProdFactory(inv(sigma))
Resp_i[:] = LH[:] = pi * const_i * \
exp(-.5 * array(map(_CrossProd, yCent_i.T)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# If no data in the i-th cluster, fixing pi[i] to zero
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
except linalg.LinAlgError:
pi = 0.
pass
self.Resppower()
self.normalizeResp()
def Learning(self):
"""
M step of EM algorithm
Call learning sub-methods in the following order:
I) self.mu_clusters()
II) self.sigma_clusters()
III) self.pi_clusters()
NB It's mandatory to call self.mu_clusters() before
self.sigma_clusters(), because the latter needs the
terms just updated by self.mu_clusters.
"""
self.mu_clusters()
self.sigma_clusters()
self.pi_clusters()
def logLikelihood(self):
logLH_old = self.logLH
"""
LH = 0.
LHComp = []
for pi, mu, sigma in zip(self.pi, self.mu, self.sigma):
const_i = const * abs(det(sigma)) ** -.5
inv_sigma = inv(sigma)
_mu = mu.reshape(self.p, 1)
LHComp.append(pi * vdot(self.yyT - dot(_mu, _mu.T), .5 * inv_sigma))
self.logLH = sum(LHComp)
"""
self.logLH = self.logLH_const + sum(map(log, self.LH_temp.sum(axis = 0))) / self.n
self.deltalogLH = self.logLH - logLH_old
self.logLH_tracks.append(self.logLH)
def MAP(self):
return argmax(self.Resp, axis = 0)
def GetExpectedLatent(self):
return self.MAP()
# No (WN) noise added
def InferObs(self):
mp = self.MAP()
return [self.y[:, mp == i] for i in xrange(self.m)]
# No (WN) noise added
def GetNewObs(self, centered = False): pass
def GetCompProb(self, obs):
CompProb = []
for pi, mu, sigma in zip(self.pi, self.mu, self.sigma):
try:
const_i = ((2*pi) ** self.p) * (abs(det(sigma)) ** -.5)
_CrossProd = self.CrossProdFactory(inv(sigma))
CompProb.append(pi * const_i * \
_CrossProd(obs - mu.reshape(self.p, 1)))
except linalg.LinAlgError: CompProb.append(0.)
return CompProb
def entropy(self):
raise NotImplementedError
class mog(mixture):
"""Based mainly on ref.3, and also on ref.1, 6 and 7"""
def __init__(self, y, m, typePrior_mu = ''):
super(mog, self).__init__(y)
if not isinstance(m, int):
raise TypeError('m (the number of mixture components) must be an integer')
if m <= 0:
raise ValueError('m (the number of mixture components) must be positive')
self.m = m
self.typePrior_mu = typePrior_mu
self.initialize()
def initialize_sigma(self):
self.sigma = empty(shape = (self.m, self.p, self.p))
self.sigma_clusters()
def mu_clusters(self):
self.mu = dot(self.Resp, self.y.T) / self.Resp.sum(axis = 1).reshape(self.m, 1)
def sigma_clusters(self):
for Resp_i, mu, sigma in zip(self.Resp, self.mu, self.sigma):
y_mu = self.y - mu.reshape(self.p, 1)
sigma[:] = dot(Resp_i * y_mu, y_mu.T) / Resp_i.sum()
class vq(mog):
"""
high alfa: small and separated clusters, approaching hard clustering or WTA rule
alfa = 1: mog
low alfa: smooth, fuzzy like, large and overlapping clusters
"""
def InferandLearn(self, max_iter_nr = 100, alfa = 1): #entropy_delta = 1e-3
self.alfa = alfa
lm.InferandLearn(self, max_iter_nr = max_iter_nr)
def Resppower(self): self.Resp = powerer(self.Resp, self.alfa)
class hardvq(vq):
"""alfa->infinite (so hard clsutering or Winner-Take-All rule [WTA])"""
def normalizeResp(self): pass
def Resppower(self):
indices_max = nanargmax(self.Resp, axis=0)
self.Resp = zeros((self.m, self.n))
self.Resp[indices_max, arange(self.n)] = 1.
class hard2vq(hardvq):
"""WTA and clusters equally probable"""
def initialize_pi(self): self.pi = ones(self.m, dtype = 'float') / self.m
def pi_clusters(self): pass
class kmeans(hard2vq):
"""WTA, clusters equally probable and covariance matrices all equal to I"""
def initialize_sigma(self):
self.sigma = zeros(shape = (self.m, self.p, self.p))
self.sigma[:, self.arangep, self.arangep] = ones(self.p)
def sigma_clusters(self): pass
class mofa(mixture):
"""
Mixture of Factor Analyzers
Based on ref.2, 3, 10 and 11.
NB Formula (11) in ref.3, concerning the learning of the Uniquenesses
of each Factor Analyzer, seems to be not correct when such covariance
matrices have not be fixed equal a priori: in the last case replace
the N at the denominator with the sum of Responsibilities for each
Factor Analyzer (FA).
"""
k = None
def __init__(self, y, m, k, commonUniq = False, typePrior_mu = 'normal'):
"""
m: the nr of mixture FA components
k: the (tuple of) nr of hidden factors for each FA component
commonUniq: a boolean set to True if we want to learn a common
Uniquenesses term for each FA component, as in ref.2
(at regard see class method postprocess_R).
typePrior_mu: set to 'normal' or 'uniform' defining the pdf used
to initialize the self.mu terms.
"""
super(mofa, self).__init__(y)
if not isinstance(m, int):
raise TypeError('m (the number of mixture components) must be an integer')
if m <= 0:
raise ValueError('m (the number of mixture components) must be positive')
self.m = m
if hasattr(k, '__iter__'):
try: map(int, k)
except ValueError:
raise ValueError('Specify integers for the iterable of latent factors nr')
if len(k) < m:
raise Exception('Specify as many latent factors as components nr')
for ki in k:
if ki <= 0:
raise Exception('Specify positive integers as latent factors nr')
else:
try: k = int(k)
except ValueError:
raise Exception('Specify an integer for the nr of latent factors')
if k <= 0:
raise Exception('Specify a positive integer for the nr of latent factors')
k = [k] * m
self.k = tuple(k)
self.commonUniq = commonUniq
self.typePrior_mu = typePrior_mu
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Instantiating the Mixture of Factor Analyzers
# Giving None in input to fa class instances, because
# superclass lm just got data y when given in input to mofa
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.fas = [fa(None, ki) for ki in self.k]
self.initialize()
def initialize_sigma(self):
self.sigma = zeros(shape = (self.m, self.p, self.p))
for sg, fa in zip(self.sigma, self.fas):
sg[:] = dot(fa.C, fa.C.T) + diag(fa.R)
def initializelogLH(self):
self.logLH = -inf
self.deltalogLH = inf
self.logLH_tracks = []
self.logLH__break = False
for fa in self.fas:
fa.initializelogLH()
fa.betaInferenceLemma() #betaInference()
self.logLH_const = -.5 * self.p * log(2. * pi)
# An help from already available Resp values
self.LH_temp = empty((self.m, self.n))
def mu_clusters(self):
"""
This function is by-passed because self.mu's are calculated in the
coupled matrix equations in sigma-clusters, together to fa.C matrix.
"""
pass
def sigma_clusters(self):
sumResp_all = self.Resp.sum(axis = 1)
for i, (k, mu, Resp, sumResp, cls) in enumerate(zip(self.k, \
self.mu, self.Resp, sumResp_all, self.fas)):
y = self.y
p, n = self.p, self.n
_mu = mu.reshape(p, 1)
cls.betaInferenceLemma() #betaInference()
beta, C, R = cls.beta, cls.C, cls.R
######################################
# E step (the following two rows)
######################################
Exy = dot(beta, y - _mu)
ExyBlock = vstack((Exy, ones((1, n))))
RespExy = Resp * Exy
sumRespExy = RespExy.sum(axis=1).reshape(k, 1)
RespyExy = dot(Resp * y, Exy.T)
RespyExyBlock = dot(Resp * y, ExyBlock.T)
RespExxy = dot(RespExy, Exy.T) - sumResp * dot(beta, C)
RespExxy[arange(k), arange(k)] += sumResp
RespExxyBlock = vstack((hstack((RespExxy, sumRespExy)),
append(sumRespExy.T, sumResp)))
######################################
# M step
######################################
try:
Cmu = dot(RespyExyBlock, inv(RespExxyBlock))
cls.C[:] = Cmu[:, :-1]
mu[:] = Cmu[:, -1]
cls.R[:] = diag(dot(Resp * y, y.T) - \
dot(Cmu, dot(Resp * ExyBlock, y.T)))
except linalg.LinAlgError:
print 'Mixture Component %d-th disappeared' % i
self.pi[i] = 0.
self.postprocess_R()
for sg, fa in zip(self.sigma, self.fas):
sg[:] = dot(fa.C, fa.C.T) + diag(fa.R)
def postprocess_R(self):
"""
This function process all fa.R based on self.commonUniq settings,
so weighting them by the sum of the component's Responsabilities,
or taking them equal to a mean term.
This difference emerges from what reported in the class doc about
the distinct post-processing versions given in ref.2 and 3.
"""
if not(self.commonUniq):
for Resp, fa in zip(self.Resp, self.fas):
fa.R[:] /= Resp.sum()
else:
Rmean = zeros(self.p)
for fa in self.fas:
Rmean += fa.R
Rmean /= self.n
for fa in self.fas:
fa.R[:] = Rmean
def logLikelihood_Old(self):
"""TODO: Review this!"""
logLH_old = self.logLH
LH = 0.
for fa, pi, mu in zip(self.fas, self.pi, self.mu):
fa.logLikelihood(mu = mu.reshape(self.p, 1))
LH += exp(fa.logLH) * pi
self.logLH = log(LH)
self.deltalogLH = self.logLH - logLH_old
self.logLH_tracks.append(self.logLH)
class icaMacKay(lm):
"""Based on ref.7, 9."""
def __init__(self, y):
super(icaMacKay, self).__init__(y)
self.k = self.p
self.initialize()
def initialize(self):
self.Q = self.R = None
self.A = random.uniform(-1, 1, size = (self.p, self.p)) #rand(self.p, self.p) #
self.A_start = empty((self.p, self.p))
self.A_start[:] = self.A
def nonlinear_map(self, z): return -tanh(z)
def InferandLearn(self,
maxinner_iter_nr = inf,
maxouter_iter_nr = 10,
eta = 'adaptive',
bias_eta = 100.,
verbose = False):
_nonlinear_map = self.nonlinear_map
if eta == 'adaptive':
eta = 1. / bias_eta
_switch = 1
else: eta, _switch = .002, 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterations start
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while True:
for iter_nr in xrange(maxouter_iter_nr):
for i, x in enumerate(self.y.T):
if i == maxinner_iter_nr: break
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Put x through a linear mapping
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
a = dot(self.A, x)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Put a through a nonlinear map
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
z = _nonlinear_map(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Put a back through A
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
xi = dot(self.A.T, a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Adjust the weights in accordance with
# If eta scales adaptively as 1/n, we have to add a term
# to n, otherwise the NaN values will appear in the first
# iterations of the algorithm.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
eta = (1 -_switch) * eta + _switch / (i + bias_eta)
self.A += eta * (self.A + outer(z, xi))
if any(isnan(self.A.ravel())):
if verbose:
print 'Got NaN at iter %d-th! Re-init unmix matrix A...' % (iter_nr + 1)
self.initialize()
if any(isnan(self.A.ravel())):
if verbose:
print 'Got NaN after %d iterations! Re-start and re-init unmix matrix A...' % self.max_iter_nr
self.initialize()
continue
break
| |
import numpy as np
import utilities as b_utils
import initialization as b_init
# get filename and filepath for naming of corresponding files
[cf_path, cf_name] = b_utils.seperate_path_and_filename(__file__)
###############################################################################
####################### C O N F I G U R A T I O N #########################
###############################################################################
''' Calculation parameters '''
# Number of Frames
N_T = 20
# Store current results every few steps
N_CALCS_PER_FRAME = 2
# Number of collision steps per transport step increases numerical stability
N_COLS_PER_TRANSPORT = 16
''' Specimen Parameters '''
# Number of Specimen
N_Sp = 2
# Mass of different species
MASS = np.ones(N_Sp, dtype=int)
# ALPHA[i,j] deccribes the probability of collisions between specimen i and j
ALPHA = np.ones((N_Sp, N_Sp), dtype=float)
ALPHA[:] = 8000
ALPHA /= N_COLS_PER_TRANSPORT
SPECIES_NAMES = ['Mixture_1',
'Mixture_2']
SPECIES_COLORS = ['blue',
'red']
''' Physical Parameters '''
# Number of calculated frames
# TODO: add physical units -> how many seconds?
#MAX_T = 200
# Maximum Value of Space grid TODO: DX should be constant for DIM_X != 1
MAX_X = 1.0
# Maximum Value of Velocity-grid := V_OFFSET + MAX_V
MAX_V = 3.0
# Center of velocity grid
V_OFFSET = [0.0, 0.0]
''' Space Parameters '''
# Space Dimension
DIM_X = 1
# Number of Gridpoints in Space
N_X = np.ones(4, dtype=int)
N_X[0:3] = [201, 1, 1] # [X, Y, Z] - gridpoints
N_X[3] = N_X[0:3].prod()
''' Velocity Parameters '''
# Velocity-Space Dimension
DIM_V = 2
# Number of (V-)Gridpoints per in V-Space
# TODO: This should be either an array,
# or one shuld be automatically generated
# This should be the minimal N_V for all specimen
N_V = 4
''' INITIALIZATION PARAMETERS '''
# Number of different initializing densities
# -> Mainly used for shocks, TODO rename
N_Dens = 2
#### COLLISION INVARIANTS ####
# Correlate to physical quantities: Mass, Momentum, Energy.
# They are invariant under application of the collision operator.
# For entry [i,j]: i denotes the Density (as in DENSITY_SWITCH)
# j denotes the specimen
# RHO is a multiplicative factor applied on whole density
# Correlates to MASS
RHO = np.ones((N_Dens, N_Sp), dtype=float)
RHO[0, :] = [2, 4]
RHO[1, :] = [1, 2]
# DRIFT sets the mean velocity
# Correlates to MOMENTUM
DRIFT = np.zeros((N_Dens, N_Sp, DIM_V), dtype=float)
# TEMP sets the variance of velocities
# Correlates to ENERGY
TEMP = np.ones((N_Dens, N_Sp), dtype=float)
''' DENSITY_SWITCH '''
# DENSITY_SWITCH controls how to create the initial Density u[t=0, ...]
# If DENSITY_SWITCH is a np.array with DENSITY_SWITCH[i] == k,
# Then u[t=0, x=i] will be initialized with of RHO[k],...
# If DENSITY_SWITCH is a string,
# then it denotes the address of a .npy-file containing the initial density
# Construct Initial Destribution based on Collision Invariants
DENSITY_SWITCH = np.zeros(tuple(N_X[0:3]), dtype=int)
# TODO: This should get some testing first
assert DIM_X is 1
DENSITY_SWITCH[N_X[-1]//2:,:,:] = 1
DENSITY_SWITCH = DENSITY_SWITCH.reshape(N_X[-1])
# Read Initial Destribution from File
# uncomment to generate file again
#DENSITY_SWITCH = cf_path + '_' + cf_name + "_initial_density.npy"
''' Animation Parameters '''
# Control which Moments are animated and in what order
OUTPUT_MOMENTS = [
'Mass',
'Mass Flow',
'Momentum',
'Momentum Flow',
'Energy',
'Energy Flow',
# 'COMPLETE'
]
###############################################################################
###################### AUTOMATIC GENERATION OF GRIDS ######################
###############################################################################
# TODO all of this should be more elegant
# TODO Major step for complex mixtures!
if type(N_V) == int:
# TODO: This only works for simple mixtures
N_V = np.ones(N_Sp, dtype=int) * N_V
# TODO: This only works for DIM_X == 1
assert DIM_X is 1
if type(MAX_X) == float or type(MAX_X) == int:
MAX_X = np.ones(DIM_X, dtype=float) * MAX_X
# Change Data types from lists to np.arrays, for simple indexing
# TODO do this more elegantly
N_V = np.array(N_V)
V_OFFSET = np.array(V_OFFSET)
#### Create Space Grid ####
X = b_init.get_space_array(DIM_X,
N_X,
MAX_X)
#### Create Velocity Grid ####
# Make V-index array - marks beginning for velocities of each species
spv_index = b_init.get_index_array_of_species(DIM_V,
N_Sp,
N_V)
# Create Velocity array
# contains all velocities of all species in a row
V = b_init.get_velocity_array(DIM_V,
N_Sp,
N_V,
MAX_V,
V_OFFSET,
spv_index)
# Generate Stepsizes
# TODO for DIM_X != 1 this might lead to errors
assert DIM_X == 1
DX = X[1, 0] - X[0, 0] # Stepsize in X
# TODO change this into an assert, that checks for numerical stability
DT = 0.25*DX/MAX_V # Stepsize in T
###############################################################################
####################### ASSERT SIMPLE CONDITIONS ##########################
###############################################################################
# Calculation/Time parameters
assert type(N_T) is int
assert type(N_CALCS_PER_FRAME) is int
assert type(N_COLS_PER_TRANSPORT) is int
assert type(DT) == np.float64
# Specimen Parameters
assert type(N_Sp) is int
assert N_Sp >= 1
assert type(MASS) is np.ndarray
assert MASS.dtype == int
assert MASS.shape == (N_Sp,)
assert type(ALPHA) is np.ndarray
assert ALPHA.dtype == np.float64
assert ALPHA.shape == (N_Sp, N_Sp)
assert type(SPECIES_NAMES) is list
assert len(SPECIES_NAMES) == N_Sp
assert type(SPECIES_COLORS) is list
assert len(SPECIES_COLORS) == N_Sp
assert type(spv_index) is np.ndarray
assert spv_index.dtype == int
assert spv_index.shape == (N_Sp + 1,)
assert spv_index[-1] == np.sum(N_V**DIM_V)
# Physical Parameters
assert type(MAX_X) is np.ndarray
assert MAX_X.dtype == np.float64
assert MAX_X.shape == (DIM_X,)
assert type(MAX_V) is float
assert type(V_OFFSET) is np.ndarray
assert V_OFFSET.dtype == np.float64
assert V_OFFSET.shape == (DIM_V,)
# Space Parameters
assert type(DIM_X) is int
assert DIM_X in [1, 2, 3]
assert type(N_X) is np.ndarray
assert N_X.dtype == int
assert N_X.shape == (4,)
assert DIM_X is not 1 or (N_X[0] != 1 and
N_X[1] == 1 and
N_X[2] == 1)
assert DIM_X is not 2 or (N_X[0] != 1 and
N_X[1] != 1 and
N_X[2] == 1)
assert DIM_X is not 3 or (N_X[0] != 1 and
N_X[1] != 1 and
N_X[2] != 1)
assert N_X[-1] == N_X[0:3].prod()
assert type(X) is np.ndarray
assert X.dtype == np.float64
assert X.shape == (N_X[-1], DIM_X)
assert type(DX) == np.float64
# Velocity Parameters
assert type(DIM_V) is int
assert DIM_V in [2, 3]
assert DIM_V >= DIM_X
assert type(N_V) is np.ndarray
assert N_V.dtype == int
assert N_V.shape == (N_Sp,)
assert type(V) is np.ndarray
assert V.dtype == np.float64
assert V.shape == (spv_index[-1], DIM_V)
# initialization parameters
assert type(N_Dens) is int
# Collision Invariants
assert type(RHO) is np.ndarray
assert RHO.dtype == np.float64
assert RHO.shape == (N_Dens, N_Sp)
assert type(DRIFT) is np.ndarray
assert DRIFT.dtype == np.float64
assert DRIFT.shape == (N_Dens, N_Sp, DIM_V)
assert type(TEMP) is np.ndarray
assert TEMP.dtype == np.float64
assert TEMP.shape == (N_Dens, N_Sp)
# DENSITY_SWITCH
if type(DENSITY_SWITCH) is np.ndarray:
assert DENSITY_SWITCH.max() == N_Dens - 1
assert DENSITY_SWITCH.shape == (N_X[-1],)
elif type(DENSITY_SWITCH) is str:
# assert the dimensions from file matches control values
assert np.load(DENSITY_SWITCH).shape == (N_X, np.sum(N_V**DIM_V))
# Animation Parameters
assert type(OUTPUT_MOMENTS) is list
assert set(OUTPUT_MOMENTS) <= set(['Mass',
'Mass Flow',
'Momentum',
'Momentum Flow',
'Energy',
'Energy Flow',
'COMPLETE'])
assert ('COMPLETE' not in OUTPUT_MOMENTS
or OUTPUT_MOMENTS == ['COMPLETE'])
| |
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2015 Arn-O. See the LICENSE file at the top-level directory of this
# distribution and at
# https://github.com/Arn-O/py-kodi-remote-controller/blob/master/LICENSE.
'''
Kodi remote controller based on HTTP/TCP transport, JSON and using the (cmd) interface.
'''
import kodi_api
import en_api
import fancy_disp
import socket
import requests
import json
#from datetime import timedelta
from progressbar import *
import pickle
import time
import random
import cmd
import logging
import argparse
from sys import exit
logger = logging.getLogger(__name__)
# global constants
BUFFER_SIZE = 1024
DISPLAY_NB_LINES = 10
PROFILE_NAME = 'Kodi library'
ALBUM = 'albumid'
SONG = 'songid'
#TODO: add instrospect
#TODO: display number of transactions calls in echonest API
# utility functions
def get_pykodi_params():
'''Get Kodi sever IP and port'''
parser = argparse.ArgumentParser()
parser.add_argument("ip",
help='IP of your Kodi server')
parser.add_argument("--tcp",
action="store_true",
help='Use TCP transport')
parser.add_argument("-p", "--port",
type=int,
default=9090,
help='TCP or HTTP port of the Kodi server')
parser.add_argument("-u", "--user",
help='User for HTTP transport')
parser.add_argument("-pw", "--password",
help='Password for HTTP transport')
parser.add_argument("-v", "--verbosity",
action="count",
help='Increase output verbosity')
parser.add_argument("-enk", "--echonest-key",
help='Echonest API key')
parser.add_argument("-c", "--command",
default=0,
help='Execute command and quit.')
args = parser.parse_args()
server_params = {}
server_params['tcp'] = args.tcp
server_params['ip'] = args.ip
server_params['port'] = args.port
server_params['user'] = args.user
server_params['password'] = args.password
if args.verbosity == 2:
logging.basicConfig(level=logging.DEBUG)
else:
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
logger.info('Kodi controller started in verbosity mode ...')
logger.debug('... and even in high verbosity mode!')
return server_params, args.echonest_key, args.command
# local files
def is_file(fname):
'''Return false if the file does not exist'''
logger.debug('call function is_file')
try:
open(fname)
except IOError:
return False
return True
def is_library_files():
'''Check if there are library local files'''
logger.debug('call function is_library_files')
ret = True
ret = ret and is_file('albums.pickle')
ret = ret and is_file('songs.pickle')
logger.info('library files check: %s', ret)
return ret
def get_audio_library(obj):
'''Manage lists for audio library, from a local file or the server'''
logger.debug('call function get_audio_library')
logger.debug('load albums library in memory')
if is_library_files():
get_audio_library_from_files(obj)
else:
get_audio_library_from_server(obj)
def save_songs(songs):
'''Save songs to local files'''
logger.debug('call function save_songs')
f = open('songs.pickle', 'wb')
pickle.dump(songs, f)
f.close()
def save_albums(albums):
'''Save albums to local files'''
logger.debug('call function save_albums')
f = open('albums.pickle', 'wb')
pickle.dump(albums, f)
f.close()
def get_audio_library_from_files(obj):
'''Load the library in memory from local files'''
logger.debug('call function get_audio_library_from_files')
f = open('songs.pickle', 'rb')
obj.songs = pickle.load(f)
f.close()
obj.nb_songs = len(obj.songs)
f = open('albums.pickle', 'rb')
obj.albums = pickle.load(f)
f.close()
obj.nb_albums = len(obj.albums)
def get_audio_library_from_server(obj):
'''Load the library in memory from the Kodi server'''
logger.debug('get_audio_library_from_server')
print "Loading the Kodi server library, this may be very long"
print
# Loading songs
songs_dummy = kodi_api.audiolibrary_get_songs(obj.kodi_params, 0, 1)
nb_songs = songs_dummy['limits']['total']
logger.debug('number of songs: %i', nb_songs)
if nb_songs==0:
logger.critical("Library seems to be empty.")
exit()
obj.nb_songs = nb_songs
widgets = [
'Songs: ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' (', Counter(), ' in ' + str(nb_songs) + ') ',
ETA()]
pbar = ProgressBar(widgets=widgets, maxval=nb_songs)
pbar.start()
limits = range(0, nb_songs, 20)
if not limits[-1] == nb_songs:
limits.append(nb_songs)
for start, end in zip(limits[:-1], limits[1:]):
logger.info('Processing song %i to %i ...', start, end)
pbar.update(start)
while True:
try:
#TODO: use an API function
command = {"jsonrpc": "2.0",
"method": "AudioLibrary.GetSongs",
"params": {
"properties": [
"title",
"artist",
"year",
"rating",
"playcount",
"musicbrainztrackid",
"genre"
],
"limits": {
"start": start,
"end": end } },
"id": 1}
ret = kodi_api.call_api(obj.kodi_params, command)
for song in ret['result']['songs']:
obj.songs[song['songid']] = {}
obj.songs[song['songid']]['title'] = song['title']
if song['artist']:
obj.songs[song['songid']]['artist'] = song['artist'][0]
else:
obj.songs[song['songid']]['artist'] = ''
obj.songs[song['songid']]['year'] = song['year']
obj.songs[song['songid']]['rating'] = song['rating']
obj.songs[song['songid']]['playcount'] = song['playcount']
obj.songs[song['songid']][
'musicbrainztrackid'] = song['musicbrainztrackid']
obj.songs[song['songid']]['genre'] = song['genre']
# store the last update to echonest profile
obj.songs[song['songid']]['rating_en'] = 0
obj.songs[song['songid']]['playcount_en'] = 0
break
except KeyError:
#TODO: improve error catching, limit to API errors
logger.info('error when loading library, retry')
pbar.finish()
save_songs(obj.songs)
# Loading albums
albums_dummy = kodi_api.audiolibrary_get_albums(obj.kodi_params, 0, 1)
nb_albums = albums_dummy['limits']['total']
logger.debug('number of albums: %i', nb_albums)
obj.nb_albums = nb_albums
widgets = [
'Albums: ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' (', Counter(), ' in ' + str(nb_albums) + ') ',
ETA()]
pbar = ProgressBar(widgets=widgets, maxval=nb_albums)
pbar.start()
limits = range(0, nb_albums, 10)
if not limits[-1] == nb_albums:
limits.append(nb_albums)
for start, end in zip(limits[:-1], limits[1:]):
logger.info('Processing album %i to %i ...', start, end)
pbar.update(start)
while True:
try:
#TODO: use an API function
command = {"jsonrpc": "2.0",
"method": "AudioLibrary.GetAlbums",
"params": {
"properties": [
"title",
"artist",
"year"],
"limits": {
"start": start,
"end": end } },
"id": 1}
ret = kodi_api.call_api(obj.kodi_params, command)
for album in ret['result']['albums']:
obj.albums[album['albumid']] = {}
obj.albums[album['albumid']]['title'] = album['title']
if album['artist']:
obj.albums[album['albumid']]['artist'] = album['artist'][0]
else:
obj.albums[album['albumid']]['artist'] = ''
obj.albums[album['albumid']]['year'] = album['year']
break
except KeyError:
logger.info('error when loading library, retry')
pbar.finish()
save_albums(obj.albums)
print
# parsers
def parse_single_int(line):
'''Parse line for a single int'''
logger.debug('call function parse_single_int')
args = str.split(line)
ret_val = None
try:
ret_val = int(args[0])
except IndexError:
pass
return ret_val
def parse_get_int(line):
'''Parse line for an integer'''
if len(line) == 0:
ret_val = 0
else:
ret_val = int(line)
return ret_val
def parse_get_limits(line):
'''Parse line and return start/end limits'''
if len(line) == 0:
start = 0
else:
start = int(line)
end = start + DISPLAY_NB_LINES
return (start, end)
def parse_get_string(line):
'''Parse line and return the first string (without space)'''
args = str.split(line)
return args[0]
# other
def get_albums_search(search_string, albums):
'''Internal album indexes for a string search'''
search_result_title = []
search_result_artist = []
for album_id in albums.keys():
if search_string in albums[album_id]['title'].lower():
search_result_title.append(album_id)
if search_string in albums[album_id]['artist'].lower():
search_result_artist.append(album_id)
logger.debug('search result by title: %s', search_result_title)
logger.debug('search result by artist: %s', search_result_artist)
return sorted(list(set(search_result_title + search_result_artist)))
def get_songs_search(search_string, songs):
'''Internal song indexes for a string search'''
search_result_title = []
search_result_artist = []
for song_id in songs.keys():
if search_string in songs[song_id]['title'].lower():
search_result_title.append(song_id)
if search_string in songs[song_id]['artist'].lower():
search_result_artist.append(song_id)
logger.debug('search result by title: %s', search_result_title)
logger.debug('search result by artist: %s', search_result_artist)
return sorted(list(set(search_result_title + search_result_artist)))
def get_genre_search(search_string, songs):
'''Internal song indexes for a string search'''
search_result_genre = []
for song_id in songs.keys():
for genre in songs[song_id]['genre']:
if search_string.lower() == genre.lower(): #Exact match is wanted, otherwise "Classic" is not distinguishable form "Classic Rock".
search_result_genre.append(song_id)
logger.debug('search result by genre: %s', search_result_genre)
return sorted(list(search_result_genre))
def set_songs_sync(server_params, songs):
'''Sync playcount and rating'''
logger.debug('call set_songs_sync')
print
print "Updating songs rating and playcount (could be long)"
print
nb_songs = len(songs)
logger.debug('number of songs: %i', nb_songs)
widgets = [
'Songs: ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' (', Counter(), ' in ' + str(nb_songs) + ') ',
ETA()]
pbar = ProgressBar(widgets=widgets, maxval=nb_songs)
pbar.start()
limits = range(0, nb_songs, 20)
nb_update_rating = 0
nb_update_playcount = 0
if not limits[-1] == nb_songs:
limits.append(nb_songs)
for start, end in zip(limits[:-1], limits[1:]):
logger.info('Processing song %i to %i ...', start, end)
pbar.update(start)
while True:
#TODO: use an API function
try:
command = {"jsonrpc": "2.0",
"method": "AudioLibrary.GetSongs",
"params": {
"properties": [
"rating",
"playcount",
],
"limits": {
"start": start,
"end": end } },
"id": 1}
ret = kodi_api.call_api(server_params, command)
for r_song in ret['result']['songs']:
if songs[r_song['songid']]['rating'] != r_song['rating']:
logger.info(
'updating rating for %s!',
r_song['songid'])
songs[r_song['songid']]['rating'] = r_song['rating']
nb_update_rating += 1
if songs[r_song['songid']]['playcount'] != r_song['playcount']:
logger.info(
'updating playcount for %s!',
r_song['songid'])
songs[r_song['songid']]['playcount'] = r_song['playcount']
nb_update_playcount += 1
break
except KeyError:
logger.info('error when loading library, retry')
pbar.finish()
save_songs(songs)
print
print "%i song(s) rating updated" % nb_update_rating
print "%i song(s) playcount updated" % nb_update_playcount
print
def get_profile_delta(songs):
'''Songs id with echonest rating and playcount not up-to-date'''
logger.debug('call get_profile_delta')
songs_id_delta = []
for song_id in songs.keys():
if not songs[song_id]['rating'] == songs[song_id]['rating_en']:
songs_id_delta.append(song_id)
continue
if not songs[song_id]['playcount'] == songs[song_id]['playcount_en']:
songs_id_delta.append(song_id)
continue
return songs_id_delta
def echonest_sync(api_key, profile_id, songs):
'''Sync songs with echonest tasteprofile'''
logger.debug('call echonest_sync')
#TODO: cache the profile ID
#TODO: create routines for echonest API calls + HTTP Kodi calls
en_info = en_api.echonest_info(api_key, profile_id)
if en_info['total'] == 0:
logger.info("no songs in tasteprofile, full sync")
songs_id_delta = songs.keys()
else:
logger.info("limit sync to delta")
songs_id_delta = get_profile_delta(songs)
nb_songs_delta = len(songs_id_delta)
print
print "Sync songs to the tasteprofile (this can be very very long)"
print
logger.info('delta size: %i', nb_songs_delta)
logger.debug('delta songs %s', songs_id_delta)
widgets = [
'Songs: ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' (', Counter(), ' in ' + str(nb_songs_delta) + ') ',
ETA()]
pbar = ProgressBar(widgets=widgets, maxval=nb_songs_delta)
pbar.start()
# slicing
limits = range(0, nb_songs_delta, 30)
if not limits[-1] == nb_songs_delta:
limits.append(nb_songs_delta)
for start, end in zip(limits[:-1], limits[1:]):
logger.info('Processing song index from %i to %i ...', start, end)
pbar.update(start)
command = []
songs_index_slice = range(start, end)
for song_index in songs_index_slice:
song_id = songs_id_delta[song_index]
rating = songs[song_id]['rating'] * 2
mb_song_id = 'musicbrainz:song:' + songs[song_id]['musicbrainztrackid']
#TODO: use API function
command.append({
"action": 'update',
"item": {
"item_id": str(song_id),
"song_id": mb_song_id,
"rating": rating,
"play_count": songs[song_id]['playcount']
}
})
songs[song_id]['rating_en'] = songs[song_id]['rating']
songs[song_id]['playcount_en'] = songs[song_id]['playcount']
url = 'http://developer.echonest.com/api/v4/tasteprofile/update'
headers = {'content-type': 'multipart/form-data'}
payload = {
'api_key': api_key,
'id': profile_id,
'data': json.dumps(command)}
logger.debug('command: %s', command)
r = requests.post(url, headers=headers, params=payload)
if r.status_code == 200:
logger.debug('return: %s', r.text)
else:
logger.error('return: %s', r.text)
time.sleep(0.51)
pbar.finish()
save_songs(songs)
print
def echonest_playlist(api_key, profile_id):
'''Create a premium static playlist'''
logger.debug('call echonest_playlist')
#TODO: split in API function + conversion of namespace
print
print "Requesting a playlist to echonest ..."
url = 'http://developer.echonest.com/api/v4/playlist/static'
payload = {"api_key": api_key,
"type": 'catalog',
"seed_catalog": profile_id,
"bucket": 'id:' + profile_id
}
r = requests.get(url, params=payload)
logger.debug('URL: %s', r.url)
logger.debug('return: %s', r.text)
ret = r.json()
en_songs = ret['response']['songs']
playlist = []
for en_song in en_songs:
en_id = en_song['foreign_ids'][0]['foreign_id']
kodi_id = en_id.replace(profile_id + ':song:', "")
playlist.append(int(kodi_id))
return playlist
def echonest_pl_seed(api_key, profile_id, song_id):
'''Create a premium static playlist seeded by a song'''
logger.debug('call echonest_pl_song')
#TODO: split in API function + conversion of namespace
print
print "Requesting a playlist to echonest ..."
url = 'http://developer.echonest.com/api/v4/playlist/static'
en_song_id = profile_id + ':song:' + str(song_id)
payload = {"api_key": api_key,
"type": 'catalog',
"seed_catalog": profile_id,
"song_id": en_song_id,
"bucket": 'id:' + profile_id
}
r = requests.get(url, params=payload)
logger.debug('URL: %s', r.url)
logger.debug('return: %s', r.text)
ret = r.json()
en_songs = ret['response']['songs']
playlist = []
for en_song in en_songs:
en_id = en_song['foreign_ids'][0]['foreign_id']
kodi_id = en_id.replace(profile_id + ':song:', "")
playlist.append(int(kodi_id))
return playlist
def get_profile_id(api_key):
'''Get echonest profile profile ID'''
#TODO: split in unit API functions
logger.debug('call get_profile_id')
url = 'http://developer.echonest.com/api/v4/tasteprofile/profile'
payload = {
'api_key': api_key,
'name': PROFILE_NAME}
r = requests.get(url, params=payload)
if r.status_code == 400:
logger.debug('no taste profile found')
url = 'http://developer.echonest.com/api/v4/tasteprofile/create'
headers = {'content-type': 'multipart/form-data'}
payload = {
'api_key': api_key,
'name': PROFILE_NAME,
'type': 'general'}
r = requests.post(url, headers=headers, params=payload)
ret = r.json()
profile_id = ret['response']['id']
else:
logger.debug('taste profile found')
ret = r.json()
profile_id = ret['response']['catalog']['id']
logger.debug('return: %s', r.text)
logger.debug('profile id: %s', profile_id)
return profile_id
def get_profile_id(api_key):
'''Get echonest profile profile ID'''
#TODO: split in unit API functions
logger.debug('call get_profile_id')
url = 'http://developer.echonest.com/api/v4/tasteprofile/profile'
payload = {
'api_key': api_key,
'name': PROFILE_NAME}
r = requests.get(url, params=payload)
if r.status_code == 400:
logger.debug('no taste profile found')
url = 'http://developer.echonest.com/api/v4/tasteprofile/create'
headers = {'content-type': 'multipart/form-data'}
payload = {
'api_key': api_key,
'name': PROFILE_NAME,
'type': 'general'}
r = requests.post(url, headers=headers, params=payload)
ret = r.json()
profile_id = ret['response']['id']
else:
logger.debug('taste profile found')
ret = r.json()
profile_id = ret['response']['catalog']['id']
logger.debug('return: %s', r.text)
logger.debug('profile id: %s', profile_id)
return profile_id
def playback(kodi_params):
'''Start playback'''
logger.debug('call function playback')
if kodi_api.player_get_active(kodi_params):
kodi_api.player_play_pause(kodi_params)
else:
kodi_api.player_open(kodi_params)
def playback_stop(kodi_params):
'''Start playback'''
logger.debug('call function playback stop')
if kodi_api.player_get_active(kodi_params):
kodi_api.player_stop(kodi_params)
def populate_playlist(song_ids, kodi_params):
'''Create a playlist from an array of song_id'''
print
print "Populating the playlist... "
for song_id in song_ids:
kodi_api.playlist_add(SONG, song_id, kodi_params)
print " ... let's rock the house!"
# process return messages
class KodiRemote(cmd.Cmd):
def __init__(self,kodi_params=0,api_key=0,command=0):
# either the commandline options are parsed
if kodi_params == 0:
(self.kodi_params, self.api_key, self.command) = get_pykodi_params()
else:
# or the custom server arguments are taken
self.kodi_params=kodi_params
self.command=command
self.api_key=api_key
# initialize library description
self.nb_songs = 0
self.songs = {}
self.nb_albums = 0
self.albums = {}
# fill data
get_audio_library(self)
cmd.Cmd.__init__(self)
'''Subclass of the cmd class'''
def preloop(self):
''' Check if we skip command line and directly execute the passed command'''
if self.command!=0:
logger.info("Executing custom command")
self.onecmd(self.command)
#TODO find out how to detect errors.
quit()
else:
# customize prompt
sys_name = kodi_api.system_friendly_name(self.kodi_params)
self.prompt = "(" + sys_name + ") "
fancy_disp.smart_help()
# albums functions
def do_albums_random(self, line):
'''
Display a random selection of albums
Usage: albums_random
'''
logger.debug('call function do_albums_random')
albums_pos = random.sample(xrange(self.nb_albums), DISPLAY_NB_LINES)
fancy_disp.albums_index(albums_pos, self.albums)
def do_albums_page(self, line):
'''
Display a given page of the albums library
Usage: albums_page [page]
The page is optional, a random page is displayed without it.
'''
logger.debug('call function do_albums_page')
page_nb = parse_single_int(line)
if not page_nb:
logger.info('no page number provided')
page_nb = random.randrange(int(self.nb_albums / 10) + 1)
albums_pos = range(
(page_nb - 1) * DISPLAY_NB_LINES,
page_nb * DISPLAY_NB_LINES)
logger.debug('albums index range: %s', albums_pos)
# clean this conversion
album_ids = []
for album_pos in albums_pos:
album_ids.append(self.albums.keys()[album_pos])
logger.debug('albums id range: %s', album_ids)
fancy_disp.albums_index(album_ids, self.albums)
def do_albums_recent(self, line):
'''
Display recently added albums
Usage: albums_recent
'''
logger.debug('call function do_albums_recent')
albums_pos = range(
self.nb_albums + 1 - DISPLAY_NB_LINES,
self.nb_albums + 1)
fancy_disp.albums_index(albums_pos, self.albums)
def do_albums_search(self, line):
'''
Search into the albums
Usage: albums_search string
List all albums containing the string in the title or artist.
'''
logger.debug('call function do_albums_search')
search_string = line.lower()
#TODO: general refactor to album_ids (pos should not be used)
albums_pos = get_albums_search(search_string, self.albums)
fancy_disp.albums_index(albums_pos, self.albums)
# songs functions
def do_songs_page(self, line):
'''
Display a given page of the songs library
Usage: songss_page [page]
The page is optional, a random page is displayed without it.
'''
logger.debug('call function do_songs_page')
page_nb = parse_single_int(line)
if not page_nb:
logger.info('no page number provided')
page_nb = random.randrange(int(self.nb_songs / 10) + 1)
songs_pos = range(
(page_nb - 1) * DISPLAY_NB_LINES + 1,
page_nb * DISPLAY_NB_LINES + 1)
fancy_disp.songs_index(songs_pos, self.songs)
def do_songs_display(self, line):
'''
Display details for a given song
Usage songs_display id
Display all information about a given song like the playcount
or the rating.
'''
logger.debug('call function do_song_display')
song_id = parse_single_int(line)
fancy_disp.songs_details(song_id, self.songs)
def do_songs_search(self, line):
'''
Search into the songs
Usage: songs_search string
List all songs containing the string in the title or artist.
'''
logger.debug('call function do_songs_search')
search_string = line.lower()
songs_pos = get_songs_search(search_string, self.songs)
fancy_disp.songs_index(songs_pos, self.songs)
def do_songs_sync(self, line):
'''
Sync playcount and rating
Usage: songs_sync
Sync playcount and rating from the Kodi server to PyKodi.
'''
logger.debug('call function do_songs_sync')
set_songs_sync(self.kodi_params, self.songs)
# playlist functions
def do_playlist_show(self, line):
'''
Show the current audio playlist
Usage: playlist_show
'''
logger.debug('call function do_playlist_show')
if kodi_api.player_get_active(self.kodi_params):
properties = kodi_api.player_get_properties(self.kodi_params)
else:
properties = None
song_ids = kodi_api.playlist_get_items(self.kodi_params)
fancy_disp.playlist(properties, song_ids, self.songs)
def do_playlist_add(self, line):
'''
Add an album to the playlist
Usage: playlist_add [id]
Add the album id to the current playlist.
Use the albums function to find the id.
The id is optional, an album is randomly selected without it.
'''
logger.debug('call function do_playlist_add')
album_id = parse_single_int(line)
if not album_id:
logger.info('no album id provided')
album_id = random.randrange(self.nb_albums)
#TODO: disp function
print
print "Album %i will be added to the playlist" % album_id
print
kodi_api.playlist_add(ALBUM, album_id, self.kodi_params)
def do_playlist_clear(self, line):
'''
Clear the playlist
Usage: playlist_clear
Remove all items from the current playlist.
'''
logger.debug('call function do_playlist_clear')
kodi_api.playlist_clear(self.kodi_params)
def do_playlist_tasteprofile(self, line):
'''
Create a playlist from echonest taste profile
Usage: playlist_tasteprofile
Generate and play a new playlist based on
echonest taste profile. The current playlist
is removed before.
'''
logger.debug('call function do_playlist_tasteprofile')
profile_id = get_profile_id(self.api_key)
while True:
song_ids = echonest_playlist(self.api_key, profile_id)
fancy_disp.songs_index(song_ids, self.songs)
action = fancy_disp.validate_playlist()
if action <> 'r':
break
if action == 'p':
playback_stop(self.kodi_params)
kodi_api.playlist_clear(self.kodi_params)
populate_playlist(song_ids, self.kodi_params)
kodi_api.player_open(self.kodi_params)
print
def do_playlist_taste_seed(self, line):
'''
Create a playlist from echonest taste profile and seeded by a song
Usage: playlist_tasteprofile song_id
Generate and play a new playlist based on
echonest taste profile. The current playlist
is removed before.
'''
#TODO: function for a single logic and several pl methods
logger.debug('call function do_playlist_tasteprofile')
song_id = parse_single_int(line)
profile_id = get_profile_id(self.api_key)
while True:
song_ids = echonest_pl_seed(self.api_key, profile_id, song_id)
fancy_disp.songs_index(song_ids, self.songs)
action = fancy_disp.validate_playlist()
if action <> 'r':
break
if action == 'p':
playback_stop(self.kodi_params)
kodi_api.playlist_clear(self.kodi_params)
populate_playlist(song_ids, self.kodi_params)
kodi_api.player_open(self.kodi_params)
print
# play functions
def do_play_album(self, line):
'''
Play a single album
Usage: play_album [id]
Play the album behind the id.
Use the albums function to find the id.
The id is optional, an album is randomly selected without it.
'''
logger.debug('call function do_play_album')
album_id = parse_single_int(line)
if not album_id:
logger.info('no album id provided')
album_index = random.randrange(self.nb_albums)
logger.debug('random album index: %i', album_index)
album_id = self.albums.keys()[album_index]
kodi_api.playlist_clear(self.kodi_params)
kodi_api.playlist_add(ALBUM, album_id, self.kodi_params)
kodi_api.player_open(self.kodi_params)
print
fancy_disp.play_album(album_id, self.albums)
print
def do_add_album(self, line):
'''
Add a single album to the playlist
Usage: add_album [id]
Add the album behind the id.
Use the albums function to find the id.
The id is optional, an album is randomly selected without it.
'''
logger.debug('call function do_play_album')
album_id = parse_single_int(line)
if not album_id:
logger.info('no album id provided')
album_index = random.randrange(self.nb_albums)
logger.debug('random album index: %i', album_index)
album_id = self.albums.keys()[album_index]
kodi_api.playlist_add(ALBUM, album_id, self.kodi_params)
print
fancy_disp.add_album(album_id, self.albums)
print
def do_play_song(self, line):
'''
Play a single song
Usage: play_song [id]
Play the song behind the id.
Use the search functions to find the id.
The id is optional, a song is randomly selected without it.
'''
logger.debug('call function do_play_song')
song_id = parse_single_int(line)
if not song_id:
logger.info('no song id provided')
song_index = random.randrange(self.nb_songs)
logger.debug('random song index: %i', song_index)
album_id = self.albums.keys()[album_index]
kodi_api.playlist_clear(self.kodi_params)
kodi_api.playlist_add(SONG, song_id, self.kodi_params)
kodi_api.player_open(self.kodi_params)
print
fancy_disp.play_song(song_id, self.songs)
print
def do_add_song(self, line):
'''
Add a single song to the playlist
Usage: add_song [id]
Add the song behind the id.
Use the search functions to find the id.
The id is optional, a song is randomly selected without it.
'''
logger.debug('call function do_add_song')
song_id = parse_single_int(line)
if not song_id:
logger.info('no song id provided')
song_index = random.randrange(self.nb_songs)
logger.debug('random song index: %i', song_index)
album_id = self.albums.keys()[album_index]
kodi_api.playlist_add(SONG, song_id, self.kodi_params)
print
fancy_disp.add_song(song_id, self.songs)
print
def do_play_party(self, line):
'''
Start a big party!
Usage: play_party
'''
logger.debug('call function do_play_party')
kodi_api.player_open_party(self.kodi_params)
def do_play_pause(self, line):
'''
Switch to play or pause
Usage: play_pause
Switch to pause if playing, switch to play if in pause.
'''
logger.debug('call function do_play_pause')
playback(self.kodi_params)
def do_play_stop(self, line):
'''
Stop the music
Usage: play_stop
Stop the music and go home, I repeat, stop the music and go home.
'''
logger.debug('call function do_play_stop')
playback_stop(self.kodi_params)
def do_play_what(self, line):
'''
Detail status of what is currently played
Usage: play_what
'''
logger.debug('call function do_play_what')
item = kodi_api.player_get_item(self.kodi_params)
properties = kodi_api.player_get_properties(self.kodi_params)
items = kodi_api.playlist_get_items(self.kodi_params)
fancy_disp.now_playing(item, properties)
fancy_disp.next_playing(properties, items)
def do_play_favorite(self, line):
'''
Like the current song (in your echonest tasteprofile)
Usage: play_favorite
'''
logger.debug('call function do_play_favorite')
song_id = kodi_api.player_get_item(self.kodi_params)
profile_id = get_profile_id(self.api_key)
en_api.echonest_favorite(self.api_key, profile_id, song_id)
print
fancy_disp.favorite(song_id, self.songs)
print
def do_play_skip(self, line):
'''
Skip the current song
Usage: play_skip
'''
logger.debug('call function do_play_skip')
song_id = kodi_api.player_get_item(self.kodi_params)
profile_id = get_profile_id(self.api_key)
kodi_api.player_goto(self.kodi_params)
en_api.echonest_skip(self.api_key, profile_id, song_id)
print
fancy_disp.skip(song_id, self.songs)
print
def do_play_genre(self,line):
'''
Start playing songs from specific genre.
Usage: play_genre [genre]
The library is search for all songs with playlist is shuffled each time
'''
logger.debug('call function do_play_genre')
song_ids=get_genre_search(line, self.songs)
if len(song_ids) >= 1:
#Listening to the same sequence is bornig, so shuffle the list each time.
random.shuffle(song_ids)
#TODO check if result is empty and is really a list
kodi_api.playlist_clear(self.kodi_params)
#First add only one song and start playback
kodi_api.playlist_add(SONG, song_ids[0], self.kodi_params)
kodi_api.player_open(self.kodi_params)
#Adding the other songs takes very long
if len(song_ids)>=2:
populate_playlist(song_ids[1:-1],self.kodi_params)
else:
logger.info("Genre %s has only one song", line)
else:
logger.error("Genre %s has no songs", line)
# volume control
def do_volume(self,percent):
'''
Set volume in percent
Usage: volume 100
'''
logger.debug('call function do_volume')
#TODO percent might not be a number between 0 and 100
try:
kodi_api.player_volume(self.kodi_params,int(percent))
except:
logger.error('Volume must be between 0 and 100.')
# echonest functions
def do_echonest_sync(self, line):
'''
Sync play count and rating with echonest taste profile
Usage: echonest_sync
'''
logger.debug('call function do_echonest_sync')
profile_id = get_profile_id(self.api_key)
echonest_sync(self.api_key, profile_id, self.songs)
def do_echonest_info(self, line):
'''
Display info about the echonest taste profile.
Usage: echonest_info
'''
logger.debug('call function do_echonest_info')
profile_id = get_profile_id(self.api_key)
en_info = en_api.echonest_info(self.api_key, profile_id)
#TODO: create disp function
print
print en_info
print
def do_echonest_read(self, line):
'''
Display data for a given item.
Usage: echonest_read item_id
'''
logger.debug('call function do_echonest_info')
profile_id = get_profile_id(self.api_key)
item_id = parse_single_int(line)
song_data = en_api.echonest_read(self.api_key, profile_id, item_id)
print
fancy_disp.echonest_read(song_data)
print
def do_echonest_delete(self, line):
'''
Delete echonest taste profile.
Usage: echonest_delete
'''
logger.debug('call function do_echonest_delete')
profile_id = get_profile_id(self.api_key)
if fancy_disp.sure_delete_tasteprofile(self.api_key, profile_id):
#TODO: insert a validation prompt
en_api.echonest_delete(self.api_key, profile_id)
def do_debug_kavod(self, line):
'''
Special debug function for Kavod library.
Usage: debug_kavod album_id
'''
print
print "Special debug mode for Kavod library"
print
print "Length of albums array: %i" % len(self.albums)
print
print "List of IDs: %s" % self.albums.keys()
print
album_id = parse_single_int(line)
ret = kodi_api.audiolibrary_get_albums(
self.kodi_params,
album_id - 1,
album_id)
print ret
print
print "In the local library: %s - %s" % (
self.albums[album_id]['title'],
self.albums[album_id]['artist'])
print
def do_EOF(self, line):
'''Override end of file'''
logger.info('Bye!')
print 'Bye!'
return True
def main():
'''Where everything starts'''
remote_controller = KodiRemote()
remote_controller.cmdloop()
if __name__ == '__main__':
main()
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
import six
if six.PY3:
long = int
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask, result_type):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(bits, result_type):
"""Like _VarintDecoder() but decodes signed values."""
signbit = 1 << (bits - 1)
mask = (1 << bits) - 1
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = (result ^ signbit) - signbit
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
_DecodeVarint = _VarintDecoder((1 << 64) - 1, int)
_DecodeSignedVarint = _SignedVarintDecoder(64, int)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
_DecodeSignedVarint32 = _SignedVarintDecoder(32, int)
def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos)
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
def EnumDecoder(field_number, is_repeated, is_packed, key, new_default):
enum_type = key.enum_type
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
value_start_pos = pos
(element, pos) = _DecodeSignedVarint32(buffer, pos)
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos]))
if pos > endpoint:
if element in enum_type.values_by_number:
del value[-1] # Discard corrupt value.
else:
del message._unknown_fields[-1]
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = _DecodeSignedVarint32(buffer, pos)
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(tag_bytes, buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value_start_pos = pos
(enum_value, pos) = _DecodeSignedVarint32(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
if enum_value in enum_type.values_by_number:
field_dict[key] = enum_value
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos]))
return pos
return DecodeField
# --------------------------------------------------------------------
Int32Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(descriptor):
"""Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = message.Extensions._FindExtensionByNumber(type_id)
if extension is not None:
value = field_dict.get(extension)
if value is None:
value = field_dict.setdefault(
extension, extension.message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append((MESSAGE_SET_ITEM_TAG,
buffer[message_set_item_start:pos]))
return pos
return DecodeItem
# --------------------------------------------------------------------
def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].MergeFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
| |
"""
coding: utf-8
Vanishing Gradients
We will demonstrate the difference between using sigmoid and ReLU nonlinearities in a simple
neural network with two hidden layers. This notebook is built off of a minimal net demo done
by Andrej Karpathy for CS 231n, which you can check out here:
http://cs231n.github.io/neural-networks-case-study/
"""
# Setup
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plot_dir = 'plots'
plot_n = 0
try:
os.makedirs(plot_dir)
except FileExistsError:
pass
def save_plot(name):
global plot_n
path = os.path.join(plot_dir, '%03d-%s-%s.png' % (plot_n, rubric, name))
print('Saving "%s"' % path)
plt.savefig(path)
plot_n += 1
# generate random data -- not linearly separable
np.random.seed(1)
N = 80 # number of points per class
D = 2 # dimensionality
K = 4 # number of classes
N_EPOCHS = 50000
d_theta = 2 * np.pi / K
S = 1.5
R = 0.6
delta = R / K
print('N=%d D=%d K=%d N_EPOCHS=%d' % (N, D, K, N_EPOCHS))
rubric = 'N=%d-K=%d-S=%.1f-R=%.1f' % (N, K, S, R)
X = np.zeros((N * K, D))
num_train_examples = X.shape[0]
y = np.zeros(N * K, dtype='uint8')
for j in range(K):
ix = range(N * j, N * (j + 1))
r = np.sqrt(np.linspace(0.0, 1, N)) # radius
t = np.linspace(j * d_theta, j * d_theta + S * np.pi, N) + np.random.randn(N) * delta # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
fig = plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim([-1, 1])
plt.ylim([-1, 1])
plt.legend()
save_plot('input')
# The sigmoid function "squashes" inputs to lie between 0 and 1. Unfortunately, this means that for
# inputs with sigmoid output close to 0 or 1, the gradient with respect to those inputs are close to
# zero. This leads to the phenomenon of vanishing gradients, where gradients drop close to zero, and
# the net does not learn well.
#
# On the other hand, the relu function (max(0, x)) does not saturate with input size. Plot these
# functions to gain intuition.
def sigmoid(x):
x = 1 / (1 + np.exp(-x))
return x
def sigmoid_grad(x):
return (x) * (1 - x)
def relu(x):
return np.maximum(0, x)
# Let's try and see now how the two kinds of nonlinearities change deep neural net training in
# practice. Below, we build a very simple neural net with three layers (two hidden layers), for
# which you can swap out ReLU/ sigmoid nonlinearities.
def three_layer_net(NONLINEARITY, X, y, model, step_size, reg):
"""function to train a three layer neural net with either RELU or sigmoid nonlinearity via
vanilla grad descent
"""
# parameter initialization
h = model['h']
h2 = model['h2']
W1 = model['W1']
W2 = model['W2']
W3 = model['W3']
b1 = model['b1']
b2 = model['b2']
b3 = model['b3']
# some hyper-parameters
# gradient descent loop
num_examples = X.shape[0]
plot_array_1 = []
plot_array_2 = []
for i in range(N_EPOCHS):
# FORWARD PROP
if NONLINEARITY == 'RELU':
hidden_layer = relu(np.dot(X, W1) + b1)
hidden_layer2 = relu(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
elif NONLINEARITY == 'SIGM':
hidden_layer = sigmoid(np.dot(X, W1) + b1)
hidden_layer2 = sigmoid(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
#print(X.shape)
#print(scores.shape)
#print(np.sum(exp_scores, axis=1, keepdims=True).shape)
#print(probs.shape)
#assert False
# compute the loss: average cross-entropy loss and regularization
# v = probs[range(num_examples), y] -> 1d vector v[i] = probs[i, y[i]]]
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs) / num_examples
reg_loss = 0.5*reg*np.sum(W1*W1) + 0.5*reg*np.sum(W2*W2) + 0.5*reg*np.sum(W3*W3)
loss = data_loss + reg_loss
if i % 1000 == 0:
print("iteration %d: loss %f" % (i, loss))
# compute the gradient on scores
dscores = probs
dscores[range(num_examples), y] -= 1
dscores /= num_examples
# BACKPROP HERE
dW3 = (hidden_layer2.T).dot(dscores)
db3 = np.sum(dscores, axis=0, keepdims=True)
if NONLINEARITY == 'RELU':
# backprop ReLU nonlinearity here
dhidden2 = np.dot(dscores, W3.T)
dhidden2[hidden_layer2 <= 0] = 0
dW2 = np.dot( hidden_layer.T, dhidden2)
plot_array_2.append(np.sum(np.abs(dW2)) / np.sum(np.abs(dW2.shape)))
db2 = np.sum(dhidden2, axis=0)
dhidden = np.dot(dhidden2, W2.T)
dhidden[hidden_layer <= 0] = 0
elif NONLINEARITY == 'SIGM':
# backprop sigmoid nonlinearity here
dhidden2 = dscores.dot(W3.T)*sigmoid_grad(hidden_layer2)
dW2 = (hidden_layer.T).dot(dhidden2)
plot_array_2.append(np.sum(np.abs(dW2))/np.sum(np.abs(dW2.shape)))
db2 = np.sum(dhidden2, axis=0)
dhidden = dhidden2.dot(W2.T)*sigmoid_grad(hidden_layer)
dW1 = np.dot(X.T, dhidden)
plot_array_1.append(np.sum(np.abs(dW1))/np.sum(np.abs(dW1.shape)))
db1 = np.sum(dhidden, axis=0)
# add regularization
dW3 += reg * W3
dW2 += reg * W2
dW1 += reg * W1
#option to return loss, grads -- uncomment next comment
grads={}
grads['W1']=dW1
grads['W2']=dW2
grads['W3']=dW3
grads['b1']=db1
grads['b2']=db2
grads['b3']=db3
#return loss, grads
# update
W1 += -step_size * dW1
b1 += -step_size * db1
W2 += -step_size * dW2
b2 += -step_size * db2
W3 += -step_size * dW3
b3 += -step_size * db3
# evaluate training set accuracy
if NONLINEARITY == 'RELU':
hidden_layer = relu(np.dot(X, W1) + b1)
hidden_layer2 = relu(np.dot(hidden_layer, W2) + b2)
elif NONLINEARITY == 'SIGM':
hidden_layer = sigmoid(np.dot(X, W1) + b1)
hidden_layer2 = sigmoid(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
predicted_class = np.argmax(scores, axis=1)
print('training accuracy: %.2f' % (np.mean(predicted_class == y)))
# return cost, grads
return plot_array_1, plot_array_2, W1, W2, W3, b1, b2, b3
# #### Train net with sigmoid nonlinearity first
# Initialize toy model, train sigmoid net
# N = 100 # number of points per class
# D = 2 # dimensionality
# K = 3 # number of classes
h = 50
h2 = 50
num_train_examples = X.shape[0]
model = {}
model['h'] = h # size of hidden layer 1
model['h2'] = h2 # size of hidden layer 2
model['W1'] = 0.1 * np.random.randn(D, h)
model['b1'] = np.zeros((1, h))
model['W2'] = 0.1 * np.random.randn(h, h2)
model['b2'] = np.zeros((1, h2))
model['W3'] = 0.1 * np.random.randn(h2, K)
model['b3'] = np.zeros((1, K))
(sigm_array_1, sigm_array_2, s_W1, s_W2, s_W3, s_b1, s_b2, s_b3
) = three_layer_net('SIGM', X, y, model, step_size=1e-1, reg=1e-3)
# #### Now train net with ReLU nonlinearity
# In[33]:
#Re-initialize model, train relu net
model={}
model['h'] = h # size of hidden layer 1
model['h2'] = h2# size of hidden layer 2
model['W1'] = 0.1 * np.random.randn(D,h)
model['b1'] = np.zeros((1,h))
model['W2'] = 0.1 * np.random.randn(h,h2)
model['b2'] = np.zeros((1,h2))
model['W3'] = 0.1 * np.random.randn(h2,K)
model['b3'] = np.zeros((1,K))
(relu_array_1, relu_array_2, r_W1, r_W2,r_W3, r_b1, r_b2,r_b3
) = three_layer_net('RELU', X, y, model, step_size=1e-1, reg=1e-3)
# # The Vanishing Gradient Issue
# We can use the sum of the magnitude of gradients for the weights between hidden layers as a cheap
# heuristic to measure speed of learning (you can also use the magnitude of gradients for each
# neuron in the hidden layer here). Intuitively, when the magnitude of the gradients of the weight
# vectors or of each neuron are large, the net is learning faster. (NOTE: For our net, each hidden
# layer has the same number of neurons. If you want to play around with this, make sure to adjust
# the heuristic to account for the number of neurons in the layer).
# In[34]:
fig = plt.figure()
plt.plot(np.array(sigm_array_1))
plt.plot(np.array(sigm_array_2))
plt.title('Sum of magnitudes of gradients -- SIGM weights')
plt.legend(("sigm first layer", "sigm second layer"))
save_plot('gradients.SIGM.weights')
# In[35]:
fig = plt.figure()
plt.plot(np.array(relu_array_1))
plt.plot(np.array(relu_array_2))
plt.title('Sum of magnitudes of gradients -- ReLU weights')
plt.legend(("relu first layer", "relu second layer"))
save_plot('gradients.ReLU.weights')
# In[36]:
# Overlaying the two plots to compare
fig = plt.figure()
plt.plot(np.array(relu_array_1), ls=':')
plt.plot(np.array(relu_array_2), ls=':')
plt.plot(np.array(sigm_array_1))
plt.plot(np.array(sigm_array_2))
plt.title('Sum of magnitudes of gradients -- hidden layer neurons')
plt.legend(("relu first layer", "relu second layer", "sigm first layer", "sigm second layer"))
save_plot('gradients.hidden.layer')
# #### Feel free to play around with this notebook to gain intuition. Things you might want to try:
#
# - Adding additional layers to the nets and seeing how early layers continue to train slowly for the sigmoid net
# - Experiment with hyper-parameter tuning for the nets -- changing regularization and gradient descent step size
# - Experiment with different nonlinearities -- Leaky ReLU, Maxout. How quickly do different layers learn now?
#
#
# We can see how well each classifier does in terms of distinguishing the toy data classes. As
# expected, since the ReLU net trains faster, for a set number of epochs it performs better compared
# to the sigmoid net.
# In[40]:
# plot the classifiers- SIGMOID
h = 0.02
margin = 0.2
x_min, x_max = X[:, 0].min() - margin, X[:, 0].max() + margin
y_min, y_max = X[:, 1].min() - margin, X[:, 1].max() + margin
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(sigmoid(np.dot(sigmoid(np.dot(np.c_[xx.ravel(), yy.ravel()], s_W1)
+ s_b1), s_W2) + s_b2), s_W3) + s_b3
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend()
save_plot('classify.SIGM')
# In[38]:
# plot the classifiers-- RELU
h = 0.02
x_min, x_max = X[:, 0].min() - margin, X[:, 0].max() + margin
y_min, y_max = X[:, 1].min() - margin, X[:, 1].max() + margin
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(relu(np.dot(relu(np.dot(np.c_[xx.ravel(), yy.ravel()], r_W1)
+ r_b1), r_W2) + r_b2), r_W3) + r_b3
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend()
save_plot('classify.ReLU')
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Step size manipulations based on gradient history
Created 2020-01
@author: zhoujia, alfoa
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import abc
import numpy as np
from scipy.optimize import minpack2
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import utils, InputData, InputTypes, mathUtils, randomUtils
from .StepManipulator import StepManipulator
from . import NoConstraintResolutionFound, NoMoreStepsNeeded
#Internal Modules End--------------------------------------------------------------------------------
class ConjugateGradient(StepManipulator):
"""
Changes step size depending on history of gradients
"""
requiredInformation = ['gradientHist', 'prevStepSize']
optionalInformation = ['recommend']
##########################
# Initialization Methods #
##########################
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(ConjugateGradient, cls).getInputSpecification()
return specs
@classmethod
def getSolutionExportVariableNames(cls):
"""
Compiles a list of acceptable SolutionExport variable options.
@ In, None
@ Out, ok, list(str), list of acceptable variable names
"""
ok = super(ConjugateGradient, cls).getSolutionExportVariableNames()
ok['CG_task'] = 'for ConjugateGradient, current task of line search. FD suggests continuing the search, and CONV indicates the line search converged and will pivot.'
return ok
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
StepManipulator.__init__(self)
## Instance Variable Initialization
# public
self.needsAccessToAcceptance = True # if True, then this stepManip may need to modify opt point acceptance criteria
# _protected
self._persistence = None # consecutive line search converges until acceptance
# __private
# additional methods
self._minRotationAngle = 2.0 # how close to perpendicular should we try rotating towards?
self._numRandomPerp = 10 # how many random perpendiculars should we try rotating towards?
def handleInput(self, specs):
"""
Read input specs
@ In, specs, InputData.ParameterInput, parameter specs interpreted
@ Out, None
"""
StepManipulator.handleInput(self, specs)
growth = specs.findFirst('growthFactor')
if growth is not None:
self._growth = growth.value
shrink = specs.findFirst('shrinkFactor')
if shrink is not None:
self._shrink = shrink.value
def initialize(self, optVars, persistence=None, **kwargs):
"""
initializes this object
@ In, optVars, list(str), list of optimization variables (e.g. input space)
@ In, persistence, integer, optional, successive converges required to consider total convergence
@ In, kwargs, dict, additional unused arguments
@ Out, None
"""
StepManipulator.initialize(self, optVars, **kwargs)
self._persistence = persistence
###############
# Run Methods #
###############
def initialStepSize(self, numOptVars=None, scaling=1.0, **kwargs):
"""
Provides an initial step size
@ In, numOptVars, int, number of optimization variables
@ In, scaling, float, optional, scaling factor
"""
return mathUtils.hyperdiagonal(np.ones(numOptVars) * scaling) * self._initialStepScaling
def step(self, prevOpt, gradientHist=None, prevStepSize=None, objVar=None, **kwargs):
"""
calculates the step size and direction to take
@ In, prevOpt, dict, previous opt point
@ In, gradientHist, deque, optional, if not given then none available; list of gradient dictionaries with 0 being oldest; versors
@ In, prevStepSize, deque, optional, if not given then none available; list of float step sizes
@ In, recommend, str, optional, override to 'grow' or 'shrink' step size
@ In, kwargs, dict, keyword-based specifics as required by individual step sizers
@ Out, newOpt, dict, new opt point
@ Out, stepSize, float, new step size
"""
# Conjugate Gradient does line searches along consecutive gradient estimations
## with gradient estimations updated by more than just local estimation.
## For conjugate gradient notations, see https://en.wikipedia.org/wiki/iNonlinear_conjugate_gradient_method
## For line search notations, see github.com/scipy/scipy/blob/master/scipy/optimize/minpack2/dcsrch.f
# We start from an opt point, then find the gradient direction.
# from there we line search for the best point along that grad direction
# During this time, we store the original opt point from were we found the gradient,
# as well as the best point found so far along the line search. -> optPointHist
# In the grad hist, we store only gradients around best-in-line points historically
# In the step hist, we store line search information
lastStepInfo = prevStepSize[-1]['info']
if lastStepInfo is None:
# this MUST MEAN that this is the very very first step in this algorithm
# note that we use binary strings because that's what scipy gives us
lastStepInfo = {'task': b'START',
'fortranParams': {'iSave': np.zeros((2,), dtype=np.intc),
'dSave': np.zeros((13,), dtype=float),
}
}
lineSearchTask = lastStepInfo['task']
# get the gradient
curGradMag = gradientHist[-1][0]
curGrad = np.array(list(gradientHist[-1][1][v] for v in self._optVars)) * curGradMag
# get an array of the current optimal point
curPoint = np.array(list(prevOpt[v] for v in self._optVars))
# get the current opt point objective value
curObjVal = prevOpt[objVar]
# if we're starting a new line search because we found a minimum along the previous line search
## NOTE this only gets called the first time ever for each trajectory, because of how we start
## new line searches under the task == 'CONVERGE' switch below
if lineSearchTask == b'START':
lastStepInfo = self._startLineSearch(lastStepInfo, curPoint, curObjVal, curGrad, curGradMag)
else: # if lineSearchTask is anything except "start"
# store some indicative information about the gradient (scalar product of current gradient and
# the search vector, also "derPhi" in literature)
lastStepInfo['line']['objDerivative'] = np.dot(curGrad, lastStepInfo['searchVector']) # derPhi1
# update the line search information, and get the next task (and step size if relevant)
stepSize, task = self._lineSearchStep(lastStepInfo, curObjVal)
# take actions depending on the task
if task.startswith(b'FG'):
# continue line search
lastStepInfo['prev task'] = 'FG'
elif task.startswith(b'CONV'):
# local minimum reached, so pivot into new line search
lastStepInfo['persistence'] = 0
lastStepInfo['prev task'] = 'CONV'
lastStepInfo = self._startLineSearch(lastStepInfo, curPoint, curObjVal, curGrad, curGradMag)
stepSize, task = self._lineSearchStep(lastStepInfo, curObjVal)
elif task.startswith((b'WARN', b'ERROR')):
if task.startswith(b'WARN'):
lastStepInfo['prev task'] = 'WARN'
msg = task[9:].decode().lower()
print('ConjugateGradient WARNING: "{}"'.format(msg))
elif task.startswith(b'ERROR'):
lastStepInfo['prev task'] = 'ERROR'
print('ConjugateGradient ERROR: Not able to continue line search!')
lastStepInfo['persistence'] += 1
if lastStepInfo['persistence'] >= self._persistence:
raise NoMoreStepsNeeded
else:
self.raiseAnError(RuntimeError, 'Unrecognized "task" return from scipy.optimize.minpack2: "{}"'.format(task))
lastStepInfo['stepSize'] = stepSize
lastStepInfo['task'] = task
currentPivot = lastStepInfo['pivot']['point']
newPivot = currentPivot - stepSize * lastStepInfo['pivot']['gradient']
newOpt = dict((var, newPivot[v]) for v, var in enumerate(self._optVars))
return newOpt, stepSize, lastStepInfo
def fixConstraintViolations(self, proposed, previous, fixInfo):
"""
Given constraint violations, update the desired optimal point to consider.
@ In, proposed, dict, proposed new optimal point
@ In, previous, dict, previous optimal point
@ In, fixInfo, dict, contains record of progress in fixing search including but not limited to angles, perpendiculars, counters, and step sizes
@ Out, proposed, new proposed point
@ Out, stepSize, float, new step size taken
@ Out, fixInfo, dict, updated fixing info
"""
# TODO this is copied from GradientHistory; it should be updated for the ConjugateGradient when
# we know how we want to do this
# DESIGN
# While not okay:
# 1. See if cutting the step will fix it.
# 2. If not, try rotating towards a random perpendicular. Repeat 1.
# 3. If not, try a new random perpendicular. Repeat 1. Repeat N times.
# TODO should this be specific to step manipulators, or something else?
# TODO updating opt point in place! Is this safe?
minStepSize = fixInfo['minStepSize']
stepVector = dict((var, proposed[var] - previous[var]) for var in self._optVars)
stepDistance, stepDirection, _ = mathUtils.calculateMagnitudeAndVersor(list(stepVector.values()))
if 'originalStepSize' not in fixInfo:
fixInfo['originalStepSize'] = stepDistance
if 'perpDir' in fixInfo:
perpDir = fixInfo['perpDir']
# if not done cutting step, start cutting
if stepDistance > minStepSize:
# cut step again
stepSize = 0.5 * stepDistance # TODO user option?
for v, var in enumerate(stepVector):
proposed[var] = previous[var] + stepSize * stepDirection[v]
print(' ... cutting step ...') # norm step to {}, new norm opt {}'.format(stepSize, proposed))
return proposed, stepSize, fixInfo
else:
### rotate vector and restore full step size
stepSize = fixInfo['originalStepSize']
# store original direction
if 'originalDirection' not in fixInfo:
fixInfo['originalDirection'] = np.atleast_1d(stepDirection)
# if this isn't the first time, check if there's angle left to rotate through; reset if not
if 'perpDir' in fixInfo:
ang = mathUtils.angleBetweenVectors(stepDirection, fixInfo['perpDir'])
print(' ... trying angle:', ang)
if ang < self._minRotationAngle:
del fixInfo['perpDir']
if 'perpDir' not in fixInfo:
# find perpendicular vector
perp = randomUtils.randomPerpendicularVector(fixInfo['originalDirection'])
# NOTE we could return to point format, but no reason to
# normalize perpendicular to versor and resize
rotations = fixInfo.get('numRotations', 0)
if rotations > self._numRandomPerp:
raise NoConstraintResolutionFound
_, perpDir, _ = mathUtils.calculateMagnitudeAndVersor(perp)
fixInfo['perpDir'] = perpDir
fixInfo['numRotations'] = rotations + 1
# END fixing perpendicular direction
# rotate vector halfway towards perpendicular
perpDir = fixInfo['perpDir']
# rotate towards selected perpendicular
splitVector = {} # vector that evenly divides stepDirection and perp
for v, var in enumerate(self._optVars):
splitVector[var] = stepDirection[v] + perpDir[v]
#splitVector[var] = - stepDirection[v] + perpDir[v]
_, splitDir, _ = mathUtils.calculateMagnitudeAndVersor(list(splitVector.values()))
for v, var in enumerate(self._optVars):
proposed[var] = previous[var] + stepSize * splitDir[v]
print(' ... rotating step ...') #ed norm direction to {}, new norm opt {}'.format(splitDir, proposed))
return proposed, stepSize, fixInfo
def needDenormalized(self):
"""
Determines if this algorithm needs denormalized input spaces
@ In, None
@ Out, needDenormalized, bool, True if normalizing should NOT be performed
"""
return True
def updateSolutionExport(self, stepHistory):
"""
Prints information to the solution export.
@ In, stepHistory, list, (magnitude, versor, info) for each step entry
@ Out, info, dict, realization of data to go in the solutionExport object
"""
lastStepInfo = stepHistory[-1]['info']
if lastStepInfo is not None:
task = lastStepInfo['prev task']
info = {'CG_task': task,
}
else:
info = {'CG_task': 'START'}
return info
def trajIsFollowing(self, traj, opt, info, dataObject, followers, tolerance):
"""
Determines if the current trajectory is following another trajectory.
@ In, traj, int, integer identifier for trajectory that needs to be checked
@ In, opt, dict, DENORMALIZED most recent optimal point for trajectory
@ In, info, dict, additional information about optimal point
@ In, dataObject, DataObject.DataSet, data collected through optimization so far (SolutionExport)
@ In, followers, list(int), trajectories that are following traj currently
@ In, tolerance, float, termination distance (in scaled space)
@ Out, found, int, trajectory that traj is following (or None)
"""
if followers is None:
followers = []
# we define a trajectory as following if its current opt point is sufficiently near other opt
# points from other trajectories
matchDict = dict((var, opt[var]) for var in self._optVars)
# only look in accepted points #TODO would there be value in looking at others?
matchDict['CG_task'] = 'CONV'
# only look at other trajectories that this trajectory hasn't killed
noMatchDict = {'trajID': [traj] + followers}
_, found = dataObject.realization(matchDict=matchDict, noMatchDict=noMatchDict, tol=tolerance)
if found is not None:
return found['trajID']
return None
###################
# Utility Methods #
###################
def _polakRibierePowellStep(self, prevGrad, curGrad, gradDotProduct, searchVector):
"""
Update the search vector (magnitude and direction) for conjugate gradient
@ In, prevGrad, np.array, previous gradient in order of sampled variables
@ In, curGrad, np.array, current gradient in order of sampled variables
@ In, gradDorProduct, float, scalar product of the current grad with itself
@ In, searchVector, np.array, ongoing search vector (not unit vector)
@ Out, searchVectorMag, float, magnitude of new search vector
@ Out, searchVector, dict, search vector
"""
deltaGradient = curGrad - prevGrad
gain = max(0, np.dot(deltaGradient, curGrad) / gradDotProduct)
searchVector = -curGrad + gain * searchVector
searchVectorMag = mathUtils.calculateMultivectorMagnitude(searchVector)
return searchVectorMag, searchVector
def modifyAcceptance(self, oldPoint, oldVal, newPoint, newVal):
"""
Allows modification of acceptance criteria.
Note this is only called if self.needsAccessToAcceptance is True.
@ In, oldPoint, dict, old opt point
@ In, oldVal, float, old objective function value
@ In, newPoint, dict, new opt point
@ In, newVal, float, new objective function value
@ In, info, dict, identifying information about evaluation
@ Out, accept, boolean, whether we store the point
"""
# Because in ConjugateGradient we use all the line search information,
## we "accept" all points from the Optimizer's standpoint, and allow
## the step manipulator to use the information.
return 'accepted'
def _startLineSearch(self, lastStepInfo, curPoint, curObjVal, curGrad, curGradMag):
"""
Begins a new line search.
@ In, lastStepInfo, dict, information about the last step taken
@ In, curPoint, dict, current most-recent collected potential opt point
@ In, curObjVal, float, objective value at curPoint
@ In, curGrad, dict, magnitude-and-vector gradient estimate
@ In, curGradMag, float, magnitude of curGrad
@ Out, lastStepInfo, dict, modified with new line search information
"""
# use the previous pivots to update the conjugate gradient
## first the objective value
## then the conjugate gradient
# since we've accepted a pivot, we need to store the old pivot and set up the new one
## first grab the savable params
pivot = lastStepInfo.pop('pivot', None)
if pivot is None:
# ONLY RUN ONCE per trajectory! First time ever initialization of line step search
# use the current gradient to back-guess the would-be previous objective value
prevObjVal = curObjVal + curGradMag / 2 # oldOldFVal
# magnitude of the search vector first time is just the gradient magnitude
searchVectorMag = curGradMag
# search direction at the start is the opposite direction of the initial gradient
## note this is not great naming
searchVector = curGrad * -1 # pk
gradDotProduct = np.dot(curGrad, curGrad) # delta_k
else:
# LITERALLY every time except the first for each traj
lastStepInfo['previous pivot'] = pivot
prevObjVal = lastStepInfo['previous pivot']['objVal'] # oldFVal
prevGrad = lastStepInfo['previous pivot']['gradient']
gradDotProduct = lastStepInfo['gradDotProduct']
searchVectorMag, searchVector = self._polakRibierePowellStep(prevGrad, curGrad, gradDotProduct, lastStepInfo['searchVector'])
pivotObjDerivative = np.dot(searchVector, curGrad) # derPhi_0
stepSize = min(1.0, 1.01 * 2 * (curObjVal - prevObjVal) / pivotObjDerivative)
# comments are the literature equivalents for each variable name
lastStepInfo.update({'pivot': {'point': curPoint, # x_0
'objVal': curObjVal, # phi_0
'gradient': curGrad, # gf_k
'objDerivative': pivotObjDerivative, # derPhi_0
}, # w.r.t. pivot vals
'line': {'point': curPoint, # x_k
'objDerivative': pivotObjDerivative, # derPhi_1
}, # w.r.t. line search
'gradDotProduct': gradDotProduct, # delta_k
'searchVector': searchVector, # p_k
'searchVectorMag': searchVectorMag, # gNorm
'stepSize': stepSize, # alpha
'task': b'START',
'persistence': 0,
})
return lastStepInfo
def _lineSearchStep(self, lastStepInfo, curObjVal):
"""
Determine the next action to take in the line search process
@ In, lastStepInfo, dict, dictionary of past and present relevant information
@ In, curObjVal, float, current objective value obtained during line search
@ Out, stepSize, float, new suggested step size
@ Out, task, binary string, task of line search
"""
# determine next task in line search
stepSize = lastStepInfo['stepSize']
lineObjDerivative = lastStepInfo['line']['objDerivative']
task = lastStepInfo['task']
iSave = lastStepInfo['fortranParams']['iSave']
dSave = lastStepInfo['fortranParams']['dSave']
stepSize, _, _, task = minpack2.dcsrch(stepSize, curObjVal, lineObjDerivative,
ftol=1e-4, gtol=0.4, xtol=1e-14,
task=task, stpmin=1e-100, stpmax=1e100,
isave=iSave, dsave=dSave)
return stepSize, task
| |
import os
import socket
import json
import docker
import time
import re
import traceback
from threading import Thread
from colorama import Fore, Style
from toolset.utils.output_helper import log
from toolset.utils.database_helper import test_database
from psutil import virtual_memory
# total memory limit allocated for the test container
mem_limit = int(round(virtual_memory().total * .95))
class DockerHelper:
def __init__(self, benchmarker=None):
self.benchmarker = benchmarker
self.client = docker.DockerClient(
base_url=self.benchmarker.config.client_docker_host)
self.server = docker.DockerClient(
base_url=self.benchmarker.config.server_docker_host)
self.database = docker.DockerClient(
base_url=self.benchmarker.config.database_docker_host)
def __build(self, base_url, path, build_log_file, log_prefix, dockerfile,
tag, buildargs={}):
'''
Builds docker containers using docker-py low-level api
'''
self.benchmarker.time_logger.mark_build_start()
with open(build_log_file, 'w') as build_log:
try:
client = docker.APIClient(base_url=base_url)
output = client.build(
path=path,
dockerfile=dockerfile,
tag=tag,
forcerm=True,
timeout=3600,
pull=True,
buildargs=buildargs
)
buffer = ""
for token in output:
if token.startswith('{"stream":'):
token = json.loads(token)
token = token[token.keys()[0]].encode('utf-8')
buffer += token
elif token.startswith('{"errorDetail":'):
token = json.loads(token)
raise Exception(token['errorDetail']['message'])
while "\n" in buffer:
index = buffer.index("\n")
line = buffer[:index]
buffer = buffer[index + 1:]
log(line,
prefix=log_prefix,
file=build_log,
color=Fore.WHITE + Style.BRIGHT \
if re.match(r'^Step \d+\/\d+', line) else '')
# Kill docker builds if they exceed 60 mins. This will only
# catch builds that are still printing output.
if self.benchmarker.time_logger.time_since_start() > 3600:
log("Build time exceeded 60 minutes",
prefix=log_prefix,
file=build_log,
color=Fore.RED)
raise Exception
if buffer:
log(buffer,
prefix=log_prefix,
file=build_log,
color=Fore.WHITE + Style.BRIGHT \
if re.match(r'^Step \d+\/\d+', buffer) else '')
except Exception:
tb = traceback.format_exc()
log("Docker build failed; terminating",
prefix=log_prefix,
file=build_log,
color=Fore.RED)
log(tb, prefix=log_prefix, file=build_log)
self.benchmarker.time_logger.log_build_end(
log_prefix=log_prefix, file=build_log)
raise
self.benchmarker.time_logger.log_build_end(
log_prefix=log_prefix, file=build_log)
def clean(self):
'''
Cleans all the docker images from the system
'''
self.server.images.prune()
for image in self.server.images.list():
if len(image.tags) > 0:
# 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
image_tag = image.tags[0].split(':')[0]
if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
self.server.images.remove(image.id, force=True)
self.server.images.prune()
self.database.images.prune()
for image in self.database.images.list():
if len(image.tags) > 0:
# 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
image_tag = image.tags[0].split(':')[0]
if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
self.database.images.remove(image.id, force=True)
self.database.images.prune()
def build(self, test, build_log_dir=os.devnull):
'''
Builds the test docker containers
'''
log_prefix = "%s: " % test.name
# Build the test image
test_docker_file = '%s.dockerfile' % test.name
if hasattr(test, 'dockerfile'):
test_docker_file = test.dockerfile
test_database = ''
if hasattr(test, 'database'):
test_database = test.database
build_log_file = build_log_dir
if build_log_dir is not os.devnull:
build_log_file = os.path.join(
build_log_dir,
"%s.log" % test_docker_file.replace(".dockerfile", "").lower())
try:
self.__build(
base_url=self.benchmarker.config.server_docker_host,
build_log_file=build_log_file,
log_prefix=log_prefix,
path=test.directory,
dockerfile=test_docker_file,
buildargs=({
'BENCHMARK_ENV':
self.benchmarker.config.results_environment,
'TFB_TEST_NAME': test.name,
'TFB_TEST_DATABASE': test_database
}),
tag="techempower/tfb.test.%s" % test.name)
except Exception:
return 1
return 0
def run(self, test, run_log_dir):
'''
Run the given Docker container(s)
'''
log_prefix = "%s: " % test.name
container = None
try:
def watch_container(docker_container, docker_file):
with open(
os.path.join(
run_log_dir, "%s.log" % docker_file.replace(
".dockerfile", "").lower()), 'w') as run_log:
for line in docker_container.logs(stream=True):
log(line, prefix=log_prefix, file=run_log)
extra_hosts = None
name = "tfb-server"
if self.benchmarker.config.network is None:
extra_hosts = {
socket.gethostname():
str(self.benchmarker.config.server_host),
'tfb-server':
str(self.benchmarker.config.server_host),
'tfb-database':
str(self.benchmarker.config.database_host)
}
name = None
sysctl = {'net.core.somaxconn': 65535}
ulimit = [{
'name': 'nofile',
'hard': 200000,
'soft': 200000
}, {
'name': 'rtprio',
'hard': 99,
'soft': 99
}]
docker_cmd = ''
if hasattr(test, 'docker_cmd'):
docker_cmd = test.docker_cmd
# Expose ports in debugging mode
ports = {}
if self.benchmarker.config.mode == "debug":
ports = {test.port: test.port}
container = self.server.containers.run(
"techempower/tfb.test.%s" % test.name,
name=name,
command=docker_cmd,
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
ports=ports,
stderr=True,
detach=True,
init=True,
extra_hosts=extra_hosts,
privileged=True,
ulimits=ulimit,
mem_limit=mem_limit,
sysctls=sysctl,
remove=True,
log_config={'type': None})
watch_thread = Thread(
target=watch_container,
args=(
container,
"%s.dockerfile" % test.name,
))
watch_thread.daemon = True
watch_thread.start()
except Exception:
with open(
os.path.join(run_log_dir, "%s.log" % test.name.lower()),
'w') as run_log:
tb = traceback.format_exc()
log("Running docker container: %s.dockerfile failed" %
test.name,
prefix=log_prefix,
file=run_log)
log(tb, prefix=log_prefix, file=run_log)
return container
@staticmethod
def __stop_container(container):
try:
container.kill()
time.sleep(2)
except:
# container has already been killed
pass
@staticmethod
def __stop_all(docker_client):
for container in docker_client.containers.list():
if len(container.image.tags) > 0 \
and 'techempower' in container.image.tags[0] \
and 'tfb:latest' not in container.image.tags[0]:
DockerHelper.__stop_container(container)
def stop(self, containers=None):
'''
Attempts to stop a container or list of containers.
If no containers are passed, stops all running containers.
'''
is_multi_setup = self.benchmarker.config.server_docker_host != \
self.benchmarker.config.database_docker_host
if containers:
if not isinstance(containers, list):
containers = [containers]
for container in containers:
DockerHelper.__stop_container(container)
else:
DockerHelper.__stop_all(self.server)
if is_multi_setup:
DockerHelper.__stop_all(self.database)
DockerHelper.__stop_all(self.client)
self.database.containers.prune()
if is_multi_setup:
# Then we're on a 3 machine set up
self.server.containers.prune()
self.client.containers.prune()
def build_databases(self):
'''
Builds all the databases necessary to run the list of benchmarker tests
'''
built = []
for test in self.benchmarker.tests:
db = test.database.lower()
if db not in built and db != "none":
image_name = "techempower/%s:latest" % db
log_prefix = image_name + ": "
database_dir = os.path.join(self.benchmarker.config.db_root,
db)
docker_file = "%s.dockerfile" % db
self.__build(
base_url=self.benchmarker.config.database_docker_host,
path=database_dir,
dockerfile=docker_file,
log_prefix=log_prefix,
build_log_file=os.devnull,
tag="techempower/%s" % db)
built.append(db)
def start_database(self, database):
'''
Sets up a container for the given database and port, and starts said docker
container.
'''
image_name = "techempower/%s:latest" % database
log_prefix = image_name + ": "
sysctl = {
'net.core.somaxconn': 65535,
'kernel.sem': "250 32000 256 512"
}
ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
container = self.database.containers.run(
"techempower/%s" % database,
name="tfb-database",
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
detach=True,
ulimits=ulimit,
sysctls=sysctl,
remove=True,
log_config={'type': None})
# Sleep until the database accepts connections
slept = 0
max_sleep = 60
database_ready = False
while not database_ready and slept < max_sleep:
time.sleep(1)
slept += 1
database_ready = test_database(self.benchmarker.config, database)
if not database_ready:
log("Database was not ready after startup", prefix=log_prefix)
return container
def build_wrk(self):
'''
Builds the techempower/tfb.wrk container
'''
self.__build(
base_url=self.benchmarker.config.client_docker_host,
path=self.benchmarker.config.wrk_root,
dockerfile="wrk.dockerfile",
log_prefix="wrk: ",
build_log_file=os.devnull,
tag="techempower/tfb.wrk")
def test_client_connection(self, url):
'''
Tests that the app server at the given url responds successfully to a
request.
'''
try:
self.client.containers.run(
'techempower/tfb.wrk',
'curl --fail --max-time 5 %s' % url,
remove=True,
log_config={'type': None},
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode)
except Exception:
return False
return True
def server_container_exists(self, container_id_or_name):
'''
Returns True if the container still exists on the server.
'''
try:
self.server.containers.get(container_id_or_name)
return True
except:
return False
def benchmark(self, script, variables, raw_file):
'''
Runs the given remote_script on the wrk container on the client machine.
'''
def watch_container(container):
with open(raw_file, 'w') as benchmark_file:
for line in container.logs(stream=True):
log(line, file=benchmark_file)
sysctl = {'net.core.somaxconn': 65535}
ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
watch_container(
self.client.containers.run(
"techempower/tfb.wrk",
"/bin/bash /%s" % script,
environment=variables,
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
detach=True,
stderr=True,
ulimits=ulimit,
sysctls=sysctl,
remove=True,
log_config={'type': None}))
| |
'''
@author: Frank
'''
import readline
import os
import sys
import shlex
import hashlib
import optparse
import termcolor
import pydoc
import time
import urllib3
import zstacklib.utils.log as log
#comment out next line to print detail zstack cli http command to screen.
log.configure_log('/var/log/zstack/zstack-cli', log_to_console=False)
import apibinding.inventory as inventory
import apibinding.api as api
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.filedb as filedb
import zstackcli.parse_config as parse_config
import zstackcli.deploy_config as deploy_config
import zstackcli.read_config as read_config
cld = termcolor.colored
cprint = termcolor.cprint
text_doc = pydoc.TextDoc()
CLI_LIB_FOLDER = os.path.expanduser('~/.zstack/cli')
CLI_HISTORY = '%s/command_history' % CLI_LIB_FOLDER
CLI_RESULT_HISTORY_FOLDER = '%s/result_history' % CLI_LIB_FOLDER
CLI_RESULT_HISTORY_KEY = '%s/result_key' % CLI_RESULT_HISTORY_FOLDER
CLI_RESSULT_FILE = '%s/result' % CLI_RESULT_HISTORY_FOLDER
SESSION_FILE = '%s/session' % CLI_LIB_FOLDER
CLI_MAX_CMD_HISTORY = 1000
CLI_MAX_RESULT_HISTORY = 1000
prompt = '>>>'
query_param_keys = \
['conditions', 'count', 'limit', 'start', 'timeout', \
'replyWithCount', 'sortBy', 'sortDirection', 'fields']
class CliError(Exception):
'''Cli Error'''
class Cli(object):
'''
classdocs
'''
msg_creator = {}
LOGIN_MESSAGE_NAME = 'APILogInByAccountMsg'
LOGOUT_MESSAGE_NAME = 'APILogOutMsg'
LOGIN_BY_USER_NAME = 'APILogInByUserMsg'
CREATE_ACCOUNT_NAME = 'APICreateAccountMsg'
CREATE_USER_NAME = 'APICreateUserMsg'
ACCOUNT_RESET_PASSWORD_NAME = 'APIUpdateAccountMsg'
USER_RESET_PASSWORD_NAME = 'APIUpdateUserMsg'
@staticmethod
def register_message_creator(apiname, func):
Cli.msg_creator[apiname] = func
def usage(self):
print '''
ZStack command line tool
Type "help" for more information
Type Tab key for auto-completion
Type "quit" or "exit" or Ctrl-d to exit
'''
def print_error(self, err):
print '\033[91m' + err + '\033[0m'
def complete(self, pattern, index):
'''
pattern is current input. index is current matched number of list.
complete will be kept calling, until it return None.
'''
def prepare_primitive_fields_words(apiname, separator='=', prefix=''):
if not prefix:
api_map_name = inventory.queryMessageInventoryMap[apiname].__name__
else:
api_map_name = apiname
query_pri_fields = eval('inventory.%s().PRIMITIVE_FIELDS' % api_map_name)
query_pri_fields = ['%s' % field for field in query_pri_fields]
temp_fields = list(query_pri_fields)
query_pri_fields = []
for field in temp_fields:
if prefix:
query_pri_fields.append('%s%s%s' % (prefix, field, separator))
else:
query_pri_fields.append('%s%s' % (field, separator))
self.words.extend(query_pri_fields)
def prepare_expanded_fields_words(apiname, separator='.', prefix=''):
if not prefix:
api_map_name = inventory.queryMessageInventoryMap[apiname].__name__
else:
api_map_name = apiname
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % api_map_name)
query_ext_fields = ['%s' % field for field in query_ext_fields]
temp_fields = list(query_ext_fields)
query_ext_fields = []
for field in temp_fields:
if prefix:
query_ext_fields.append('%s%s%s' % (prefix, field, separator))
else:
query_ext_fields.append('%s%s' % (field, separator))
self.words.extend(query_ext_fields)
if 'conditions=' in self.words:
self.words.remove('conditions=')
def prepare_query_words(apiname, prefix=''):
prepare_primitive_fields_words(apiname, '=', prefix)
prepare_expanded_fields_words(apiname, '.', prefix)
def prepare_fields_words(apiname, current_fields=[]):
prepare_primitive_fields_words(apiname, ',')
for field in current_fields:
new_field = '%s,' % field
if new_field in self.words:
self.words.remove(new_field)
def prepare_words():
currtext = readline.get_line_buffer()
apiname = currtext.split()[0]
if apiname in self.words_db:
self.is_cmd = False
self.words = ['%s=' % field for field in self.api_class_params['API%sMsg' % apiname]]
if apiname.startswith('Query'):
real_api_name = 'API%sMsg' % apiname
prepare_query_words(real_api_name)
if not ('UserTag' in apiname or 'SystemTag' in apiname):
self.words.append('__systemTag__=')
self.words.append('__userTag__=')
else:
self.is_cmd = True
self.words = self.words_db
if not self.words:
return None
prepare_words()
if not self.curr_pattern or pattern.lower() != self.curr_pattern.lower():
#self.matching_words = [w for w in self.words if w.lower().startswith(pattern.lower())]
if self.is_cmd:
self.matching_words = ['%s ' % w for w in self.words if pattern.lower() in w.lower()]
else:
#need to auto complete expanded fields.
if '.' in pattern:
currtext = readline.get_line_buffer()
fields_objects = pattern.split('.')
head_field = fields_objects[0]
fields_num = len(fields_objects)
apiname = currtext.split()[0]
new_api_name = 'API%sMsg' % apiname
if inventory.queryMessageInventoryMap.has_key(new_api_name):
api_obj_name = inventory.queryMessageInventoryMap[new_api_name].__name__
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % api_obj_name)
if head_field in query_ext_fields:
current_obj_name = eval('inventory.%s().QUERY_OBJECT_MAP["%s"]' % (api_obj_name, head_field))
for i in range(0, fields_num):
if i == fields_num - 2:
break
next_field = fields_objects[i + 1]
query_ext_fields = eval('inventory.%s().EXPANDED_FIELDS' % current_obj_name)
if next_field in query_ext_fields:
current_obj_name = eval('inventory.%s().QUERY_OBJECT_MAP["%s"]' % (current_obj_name, next_field))
else:
current_obj_name = None
else:
current_obj_name = None
else:
current_obj_name = None
if current_obj_name:
self.words = []
pattern_prefix = '.'.join(fields_objects[:-1])
prepare_query_words(current_obj_name, '%s.' % pattern_prefix)
currtext = readline.get_line_buffer()
last_field = currtext.split()[-1]
if not currtext.endswith(' ') and last_field.startswith('fields='):
apiname = currtext.split()[0]
new_api_name = 'API%sMsg' % apiname
api_map_name = inventory.queryMessageInventoryMap[new_api_name].__name__
self.words = []
fields = last_field.split('=')[1]
prepare_fields_words(new_api_name, fields.split(','))
self.matching_words = [w for w in self.words if pattern.lower() in w.lower()]
self.curr_pattern = pattern
try:
return self.matching_words[index]
except IndexError:
return None
def do_command(self, line):
def check_session(apiname):
if not self.session_uuid and apiname not in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME]:
self.print_error('''Please login before running any API message
example: %sLogInByAccount accountName=admin password=your_super_secure_admin_password''' % prompt)
return False
return True
def is_api_param_a_list(apiname, param):
optional_list = eval('isinstance(inventory.%s().%s, \
inventory.OptionalList)' % (apiname, param))
not_none_list = eval('isinstance(inventory.%s().%s, \
inventory.NotNoneList)' % (apiname, param))
if optional_list or not_none_list:
return True
def build_params():
def eval_string(key, value_string):
try:
return eval(value_string)
except Exception as e:
err_msg = """
Parse command parameters error:
eval '%s' error for: '%s'
the right format is like: "[{'KEY':'VALUE'}, {'KEY':['VALUE1', 'VALUE2']}]"
""" % (value_string, key)
self.print_error(err_msg)
raise e
pairs = shlex.split(line)
if pairs[0] in self.cli_cmd:
cmd = pairs[0]
if len(pairs) > 1:
return cmd, pairs[1:]
else:
return cmd, None
apiname = 'API%sMsg' % pairs[0]
if apiname not in inventory.api_names:
raise CliError('"%s" is not an API message' % apiname)
#'=' will be used for more meanings than 'equal' in Query API
if apiname.startswith('APIQuery'):
return apiname, pairs[1:]
all_params = {}
for param_str in pairs[1:]:
params = param_str.split('=', 1)
if len(params) != 2:
raise CliError('Invalid parameter[%s], the parameter must be split by "="' % param_str)
if apiname == 'APIAddSecurityGroupRuleMsg' and params[0] == 'rules':
all_params[params[0]] = eval(params[1])
elif apiname == 'APIAttachNetworkServiceToL3NetworkMsg' and params[0] == 'networkServices':
all_params[params[0]] = eval_string(params[0], params[1])
elif apiname == 'APICreatePolicyMsg' and params[0] == 'statements':
all_params[params[0]] = eval_string(params[0], params[1])
elif is_api_param_a_list(apiname, params[0]):
all_params[params[0]] = params[1].split(',')
else:
all_params[params[0]] = params[1]
return (apiname, all_params)
def generate_query_params(apiname, params):
'''
Query params will include conditions expression, which includes ops:
=, !=, >, <, >=, <=, ?=, !?=, ~=, !~=
?= means 'in'
!?= means 'not in'
~= means 'like'
!~= means 'not like'
=null means 'is null'
!=null means 'is not null'
'''
null = 'null'
eq = '='
gt = '>'
lt = '<'
nt = '!'
lk = '~'
qs = '?'
ps = '+'
ms = '-'
perc = '%'
underscore = '_'
conditions = []
new_params = {}
for param in params:
if eq in param:
key,value = param.split(eq, 1)
if not key in query_param_keys:
if key.endswith(nt):
if value != null:
conditions.append({'name':key[:-1], \
'op':'!=', 'value': value})
else:
conditions.append({'name':key[:-1], \
'op':'is not null', 'value': ''})
elif key.endswith(gt):
conditions.append({'name':key[:-1], \
'op':'>=', 'value': value})
elif key.endswith(lt):
conditions.append({'name':key[:-1], \
'op':'<=', 'value': value})
elif key.endswith('%s%s' % (nt, qs)):
conditions.append({'name':key[:-2], \
'op':'not in', 'value': value})
elif key.endswith(qs):
conditions.append({'name':key[:-1], \
'op':'in', 'value': value})
elif key.endswith('%s%s' % (nt, lk)):
#will help to add pattern %, if user not input
if not perc in value and not underscore in value:
value = '%s%s%s' % (perc, value, perc)
conditions.append({'name':key[:-2], \
'op':'not like', 'value': value})
elif key.endswith(lk):
#will help to add pattern %, if user not input
if not perc in value and not underscore in value:
value = '%s%s%s' % (perc, value, perc)
conditions.append({'name':key[:-1], \
'op':'like', 'value': value})
else:
if value != null:
conditions.append({'name':key, \
'op':eq, 'value': value})
else:
conditions.append({'name':key, \
'op':'is null', 'value': ''})
elif key == 'conditions':
conditions.extend(value)
elif key == 'fields':
#remove the last ','
if value.endswith(','):
value = value[:-1]
new_params[key] = value.split(',')
else:
if is_api_param_a_list(apiname, key):
new_params[key] = value.split(',')
else:
new_params[key] = value
elif gt in param:
key,value = param.split(gt, 1)
conditions.append({'name':key, \
'op':gt, 'value': value})
elif lt in param:
key,value = param.split(lt, 1)
conditions.append({'name':key, \
'op':lt, 'value': value})
new_params['conditions'] = conditions
return new_params
def create_msg(apiname, params):
creator = self.msg_creator.get(apiname)
if creator:
return creator(apiname, params)
if apiname.startswith('APIQuery'):
params = generate_query_params(apiname, params)
msg = eval('inventory.%s()' % apiname)
for key in params.keys():
value = params[key]
setattr(msg, key, value)
return msg
def set_session_to_api(msg):
session = inventory.Session()
session.uuid = self.session_uuid
msg.session = session
(apiname, all_params) = build_params()
if apiname in self.cli_cmd:
#self.write_more(apiname, None)
self.cli_cmd_func[apiname](all_params)
return
if not check_session(apiname):
raise CliError("No session uuid defined")
msg = create_msg(apiname, all_params)
set_session_to_api(msg)
try:
if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.CREATE_ACCOUNT_NAME, self.CREATE_USER_NAME, self.ACCOUNT_RESET_PASSWORD_NAME, self.USER_RESET_PASSWORD_NAME]:
if not msg.password:
raise CliError('"password" must be specified')
msg.password = hashlib.sha512(msg.password).hexdigest()
if apiname == self.LOGOUT_MESSAGE_NAME:
if not msg.sessionUuid:
setattr(msg, 'sessionUuid', self.session_uuid)
start_time = time.time()
(name, event) = self.api.async_call_wait_for_complete(msg, fail_soon=True)
end_time = time.time()
if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME]:
self.session_uuid = event.inventory.uuid
open(SESSION_FILE, 'w').write(self.session_uuid)
result = jsonobject.dumps(event, True)
print '%s\n' % result
#print 'Time costing: %fs' % (end_time - start_time)
self.write_more(line, result)
except urllib3.exceptions.MaxRetryError as urlerr:
self.print_error('Is %s reachable? Please make sure the management node is running.' % self.api.api_url)
self.print_error(str(urlerr))
raise ("Server: %s is not reachable" % self.hostname)
except Exception as e:
self.print_error(str(e))
self.write_more(line, str(e), False)
raise e
def main(self, cmd = None):
if not cmd:
self.usage()
exit_code = 0
while True:
try:
if cmd:
self.do_command(cmd)
else:
line = raw_input(prompt)
if line:
self.do_command(line)
except CliError as clierr:
self.print_error(str(clierr))
exit_code = 1
except (EOFError):
print ''
import atexit
if not os.path.exists(os.path.dirname(CLI_HISTORY)):
os.system('mkdir -p %s' % os.path.dirname(CLI_HISTORY))
atexit.register(readline.write_history_file, CLI_HISTORY)
sys.exit(1)
except (KeyboardInterrupt):
print ''
except Exception as e:
exit_code = 3
self.print_error(str(e))
if cmd:
sys.exit(exit_code)
def build_api_parameters(self):
def rule_out_unneeded_params(keys):
excludes = ['session']
for k in excludes:
if k in keys:
keys.remove(k)
return keys
for apiname in inventory.api_names:
obj = eval("inventory.%s()" % apiname)
params = []
params.extend(obj.__dict__.keys())
self.api_class_params[apiname] = rule_out_unneeded_params(params)
def _parse_api_name(self, api_names):
'''
Remove API pattern 'API' and appendix 'MSG'
'''
short_api_name = []
for api in api_names:
if api.endswith('Msg'):
short_api_name.append(api[3:-3])
short_api_name.sort()
return short_api_name
def completer_print(self, substitution, matches, longest_match_length) :
def print_match(columes, new_matches, max_match_length):
cur_col = 1
for match in new_matches:
if cur_col == columes:
end_sign = '\n'
cur_col = 1
else:
end_sign = ' ' * (max_match_length - len(match))
cur_col += 1
try:
index = match.lower().index(self.curr_pattern.lower())
except Exception as e:
print "can't find pattern: %s in match: %s" % (self.curr_pattern, match)
print e
raise e
cprint(match[0:index], end='')
cprint(match[index:(len(self.curr_pattern) + index)], attrs=['bold', 'reverse'], end='')
cprint(match[(len(self.curr_pattern) + index):], end=end_sign)
def print_bold():
max_match_length = 0
matches_dot = []
matches_eq_cond = []
matches_eq_param = []
matches_ot = []
currtext = readline.get_line_buffer()
apiname = currtext.split()[0]
if apiname.startswith('Query'):
query_cmd = True
else:
query_cmd = False
for match in matches:
if len(match) > max_match_length:
max_match_length = len(match)
if match.endswith('.'):
matches_dot.append(match)
elif match.endswith('='):
for key in query_param_keys:
if query_cmd and match.startswith(key):
matches_eq_param.append(match)
break
else:
matches_eq_cond.append(match)
else:
matches_ot.append(match)
max_match_length += 2
try:
term_width = int(os.popen('stty size', 'r').read().split()[1])
except:
term_width = 80
columes = term_width/max_match_length
if columes == 0:
columes = 1
if matches_dot:
if query_cmd:
cprint('[Query Conditions:]', attrs=['bold'], end='\n')
print_match(columes, matches_dot, max_match_length)
print '\n'
if matches_eq_cond:
#cprint('[Primitive Query Conditions:]', attrs=['bold'], end='\n')
print_match(columes, matches_eq_cond, max_match_length)
print '\n'
if matches_eq_param:
if query_cmd:
cprint('[Parameters:]', attrs=['bold'], end='\n')
print_match(columes, matches_eq_param, max_match_length)
print '\n'
if matches_ot:
print_match(columes, matches_ot, max_match_length)
print '\n'
print ''
print_bold()
print ''
cprint('%s%s' % (prompt, readline.get_line_buffer()), end='')
#readline.redisplay()
def write_more(self, cmd, result, success=True):
if self.hd.get(self.start_key):
start_value = int(self.hd.get(self.start_key))
else:
start_value = 0
if self.hd.get(self.last_key):
last_value = int(self.hd.get(self.last_key))
else:
last_value = 0
if last_value <= start_value:
if start_value < CLI_MAX_RESULT_HISTORY:
start_value += 1
else:
start_value = 1
last_value = 2
else:
if last_value < CLI_MAX_RESULT_HISTORY:
start_value += 1
last_value += 1
else:
start_value += 1
last_value = 1
self.hd.set(self.start_key, start_value)
self.hd.set(self.last_key, last_value)
#filedb might leave more than 1 same key item.
while self.hd.get(str(start_value)):
self.hd.rem(str(start_value))
result_file = '%s%d' % (CLI_RESSULT_FILE, start_value)
open(result_file, 'w').write(result)
self.hd.set(str(start_value), [cmd, success])
def read_more(self, num=None, need_print=True, full_info=True):
'''
need_print will indicate whether print the command result to screen.
full_info will indicate whether return command and params information
when return command results.
'''
start_value = self.hd.get(self.start_key)
last_value = self.hd.get(self.last_key)
more_usage_list = [text_doc.bold('Usage:'), text_doc.bold('\t%smore NUM\t #show the No. NUM Command result' % prompt), text_doc.bold('\t%smore\t\t #show all available NUM and Command. The failure command will be marked with "!" before it.' % prompt)]
more_usage = '\n'.join(more_usage_list)
if not start_value:
print 'No command history to display.'
return
if num:
if num.isdigit():
if int(num) > CLI_MAX_CMD_HISTORY:
print 'Not find result for number: %s' % num
print 'Max number is: %s ' % str(CLI_MAX_RESULT_HISTORY)
cprint(more_usage, attrs=['bold'], end='\n')
return
key = start_value - int(num) + 1
if key <= 0:
key += CLI_MAX_RESULT_HISTORY
#print key
result_list = self.hd.get(str(key))
result_file = '%s%d' % (CLI_RESSULT_FILE, key)
result = open(result_file, 'r').read()
if result_list:
output = 'Command: \n\t%s\nResult:\n%s' % \
(result_list[0], result)
if need_print:
pydoc.pager(output)
if full_info:
return [result_list[0], output]
else:
return [result_list[0], result]
else:
more_list = []
explamation = text_doc.bold('!')
if start_value < last_value:
for i in range(CLI_MAX_RESULT_HISTORY):
if start_value - i > 0:
key = start_value - i
else:
key = start_value - i + CLI_MAX_RESULT_HISTORY
cmd_result = self.hd.get(str(key))
cmd_result_list = cmd_result[0].split()
cmd = text_doc.bold(cmd_result_list[0])
if len(cmd_result_list) > 1:
cmd = cmd + ' ' + ' '.join(cmd_result_list[1:])
if len(cmd_result) <= 2 or cmd_result[2]:
more_list.append('[%s]\t %s' % (str(i + 1), cmd))
else:
more_list.append('[%s] %s\t %s' % (str(i + 1), \
explamation, cmd))
else:
for i in range(start_value):
cmd_result = self.hd.get(str(start_value - i))
cmd_result_list = cmd_result[0].split()
cmd = text_doc.bold(cmd_result_list[0])
if len(cmd_result_list) > 1:
cmd = cmd + ' ' + ' '.join(cmd_result_list[1:])
if len(cmd_result) <= 2 or cmd_result[2]:
more_list.append('[%s]\t %s' % (str(i + 1), \
cmd))
else:
more_list.append('[%s] %s\t %s' % (str(i + 1), \
explamation, cmd))
more_result = '\n'.join(more_list)
header = text_doc.bold('[NUM]\tCOMMAND')
more_result = '%s\n%s\n%s' % (header, '-' * 48, more_result)
more_result = '%s\n%s' % (more_result, more_usage)
pydoc.pager(more_result)
return
print 'Not find result for number: %s' % num
cprint(more_usage, attrs=['bold'], end='\n')
def save_json_to_file(self, all_params):
def write_to_file(output, file_name, num):
file_name = os.path.abspath(file_name)
open(file_name, 'w').write(output)
print "Saved command: %s result to file: %s" % (str(num), file_name)
if not all_params:
self.show_help()
return
nums = all_params[0].split(',')
if len(all_params) > 1:
file_folder = all_params[1]
if len(nums) > 1 and not os.path.isdir(file_folder):
print "%s must be a folder, to save more than 1 command" % file_folder
return
else:
file_folder = None
if len(all_params) > 2:
json_only = all_params[2]
else:
json_only = False
for num in nums:
return_result = self.read_more(num, False, not json_only)
if not return_result:
print "cannot find related command result to save"
return
cmd, output = return_result
if not file_folder:
new_file_folder = '%s-%s.json' % (cmd.split()[0], num)
else:
new_file_folder = file_folder
dirname = os.path.dirname(new_file_folder)
if not dirname:
file_name = new_file_folder
write_to_file(output, file_name, num)
else:
if os.path.isdir(new_file_folder):
file_name = '%s/%s-%s.json' % (new_file_folder, cmd.split()[0], num)
elif os.path.isdir(dirname):
write_to_file(output, file_name, num)
else:
print "Can't find folder: %s" % dirname
def show_more(self, all_params):
if not all_params:
num = None
else:
num = all_params[0]
self.read_more(num)
def show_help(self, all_params):
help_string = text_doc.bold('Usage:')
help_string += '''
-------------------------------------------------------------------------------
help show help
more [No.] show a single or multiple command history. If a command NUM is provided, only
history of that command will show.
>>> more
>>> more 1
save [No.] [TARGET_FILE_NAME|TARGET_FOLDER]
save a single or multiple command history to a file or a directory.
>>> save 1
save history command 1 result to ./COMMAND-NAME-1.json
>>> save 1,2,3,4
save command history 1,2,3,4 to ./COMMAND-1.json, ./COMMAND-2.json,
./COMMAND-3.json, and ./COMMAND-4.json
>>> save 1 /tmp
save command history 1 to /tmp/COMMAND-1.json
>>> save 1 /tmp/1.json
save command history 1 to /tmp/1.json
ZSTACK_API [API_PARAMS]
execute a API command like LogInByAccount, QueryHost.
>>> LogInByAccount accountName=admin password=password
>>> QueryHost
If API PARAMS is a list type, use ',' to split contents.
>>> AddVmNicToSecurityGroup \\
securityGroupUuid=561f792761124a9a8fa9198684eaf5f2 \\
vmNicUuids=f994b93fe9354fd89061ea549642c6a4,\\
aee96364351e470abe1cfd919ce630b8,\\
e0c8016595a548628523d97b70e984e8
the parameter 'rules' of AddSecurityGroupRule is a list containing items of
map, you need to use a JSON object in this case.
>>> AddSecurityGroupRule \\
securityGroupUuid=561f792761124a9a8fa9198684eaf5f2 \\
rules='[{"type":"Ingress","protocol":"TCP",\\
"startPort":100,"endPort":1000},\\
{"type":"Ingress","protocol":"UDP",\\
"startPort":100,"endPort":1000}]'
Query* [conditions] [Query_API_PARAMS]
query resources with query APIs; find details at http://zdoc.readthedocs.org/en/latest/userManual/query.html.
conditions are arranged in format of:
CONDITION_NAME(no space)OPERATOR(no space)VALUE
[CONDITION_NAME] is a field name of a resource, for example, uuid, name.
[OPERATOR] is one of: '='. '!=', '>', '<', '>=', '<=',
'?=', '!?=', '~=', '!~='
most operators are straightforward except follows:
'?=": check whether a value is within a set of values; values are split by ','; this
operator is equal to 'in' operator in SQL.
>>> QueryVmInstance name?=VM1,VM2
'!?=': check whether a value is NOT within a set of values; values are split by ',';
this operator is equal to 'not in' operator in SQL.
>>> QueryVmInstance vmNics.ip!?=192.168.0.1,192.168.0.2
'~=': simple pattern matching; use % to match any number of characters, even zero characters; use _
to match exactly one character; this operator is equal to 'like' operator in SQL.
>>> QueryHost name~=IntelCore%
>>> QueryHost name~=IntelCore_7
'!~=': negation of simple pattern matching; use % to match any number of characters, even zero
characters; use _ to matches exactly one character; this operator is equal to 'not like' in SQL.
>>> QueryHost name!~=IntelCore%
>>> QueryHost name!~=IntelCore_7
'=null': NULL value test
>>> QueryVolume vmInstanceUuid=null
'!=null': NOT NULL value test
>>> QueryVolume vmInstanceUuid!=null
[VALUE] is a string containing value as query a condition; ',' is used to split value into a string list.
strings are compared as case insensitive.
'''
help_string += text_doc.bold('ZStack API')
help_string += '''
-------------------------------------------------------------------------------
'''
for api in self.raw_words_db:
help_string += ' %s\n\n' % api
pydoc.pager(help_string)
def __init__(self, options):
'''
Constructor
'''
readline.parse_and_bind("tab: complete")
readline.set_completer(self.complete)
readline.set_completion_display_matches_hook(self.completer_print)
try:
readline.read_history_file(CLI_HISTORY)
except IOError:
pass
readline.set_history_length(CLI_MAX_CMD_HISTORY)
if not os.path.isdir(CLI_RESULT_HISTORY_FOLDER):
os.system('rm -rf %s' % os.path.dirname(CLI_RESULT_HISTORY_FOLDER))
os.system('mkdir -p %s' % os.path.dirname(CLI_RESULT_HISTORY_FOLDER))
try:
self.hd = filedb.FileDB(CLI_RESULT_HISTORY_KEY, is_abs_path=True)
except:
print "Read history file: %s error, please manually delete it." % CLI_RESULT_HISTORY_KEY
return
self.start_key = 'start_key'
self.last_key = 'last_key'
self.cli_cmd_func = {'help': self.show_help, \
'history': self.show_help, \
'more': self.show_more, \
'quit': sys.exit, \
'exit': sys.exit, \
'save': self.save_json_to_file}
self.cli_cmd = self.cli_cmd_func.keys()
self.raw_words_db = self._parse_api_name(inventory.api_names)
self.words_db = list(self.raw_words_db)
self.words_db.extend(self.cli_cmd)
self.words = list(self.words_db)
self.is_cmd = False
self.curr_pattern = None
self.matching_words = None
self.api_class_params = {}
self.build_api_parameters()
self.api = None
self.session_uuid = None
if os.path.exists(SESSION_FILE):
self.session_uuid = open(SESSION_FILE, 'r').readline()
self.hostname = options.host
self.port = options.port
self.api = api.Api(host=self.hostname, port=self.port)
def main():
parser = optparse.OptionParser()
parser.add_option(
"-H",
"--host",
dest="host",
default='localhost',
action='store',
help="[Optional] IP address or DNS name of a ZStack management node. Default value: localhost")
parser.add_option(
"-p",
"--port",
dest="port",
default='8080',
action='store',
help="[Optional] Port that the ZStack management node is listening on. Default value: 8080")
parser.add_option(
"-d",
"--deploy",
dest="deploy_config_file",
default=None,
action='store',
help="[Optional] deploy a cloud from a XML file.")
parser.add_option(
"-t",
"--tempate",
dest="deploy_config_template_file",
default=None,
action='store',
help="[Optional] variable template file for XML file spcified in option '-d'")
parser.add_option(
"-D",
"--dump",
dest="zstack_config_dump_file",
default=None,
action='store',
help="[Optional] dump a cloud to a XML file")
(options, args) = parser.parse_args()
cmd = ' '.join(args)
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = options.host
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_PORT'] = options.port
if options.zstack_config_dump_file:
read_config.dump_zstack(options.zstack_config_dump_file)
elif options.deploy_config_file:
#deploy ZStack pre-configed environment.
xml_config = parse_config.DeployConfig(options.deploy_config_file, options.deploy_config_template_file)
deploy_xml_obj = xml_config.get_deploy_config()
try:
deploy_xml_obj.deployerConfig
except:
deploy_config.deploy_initial_database(deploy_xml_obj)
else:
deploy_config.deploy_initial_database(deploy_xml_obj.deployerConfig)
print('Successfully deployed a cloud from: %s' % options.deploy_config_file)
else:
cli = Cli(options)
cli.main(cmd)
if __name__ == '__main__':
main()
| |
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests sonnet.python.modules.nets.mlp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import tensorflow as tf
class MLPTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(MLPTest, self).setUp()
self.output_sizes = [11, 13, 17]
self.batch_size = 5
self.input_size = 7
self.module_name = "mlp"
self.initializers = {
"w": tf.truncated_normal_initializer(stddev=1.0),
}
self.regularizers = {
"w": tf.contrib.layers.l1_regularizer(scale=0.1),
}
self.partitioners = {
"w": tf.fixed_size_partitioner(num_shards=2),
}
def testName(self):
unique_name = "unique_name"
with tf.variable_scope("scope"):
mlp = snt.nets.MLP(name=unique_name, output_sizes=self.output_sizes)
self.assertEqual(mlp.scope_name, "scope/" + unique_name)
self.assertEqual(mlp.module_name, unique_name)
@parameterized.named_parameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testConstructor(self, activate_final, use_bias):
with self.assertRaisesRegexp(ValueError, "output_sizes must not be empty"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=[],
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"):
mlp = snt.nets.MLP(
name=self.module_name,
output_sizes=self.output_sizes,
initializers={"not_w": tf.truncated_normal_initializer(stddev=1.0)},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Initializer for 'w' is not a callable "
"function or dictionary"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers={"w": tf.zeros([1, 2, 3])},
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"Input 'activation' must be callable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activation="not_a_function",
activate_final=activate_final,
use_bias=use_bias)
with self.assertRaisesRegexp(TypeError,
"output_sizes must be iterable"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=None,
activate_final=activate_final,
use_bias=use_bias)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
initializers=self.initializers,
partitioners=self.partitioners,
regularizers=self.regularizers,
activate_final=activate_final,
use_bias=use_bias)
self.assertEqual(self.initializers, mlp.initializers)
self.assertEqual(self.regularizers, mlp.regularizers)
self.assertEqual(self.partitioners, mlp.partitioners)
self.assertEqual(len(mlp.layers), len(self.output_sizes))
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp.layers[i].output_size, self.output_sizes[i])
@parameterized.named_parameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testActivateBiasFlags(self, activate_final, use_bias):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
net = mlp(inputs)
if activate_final:
self.assertEqual(net.op.type, "Relu")
elif use_bias:
self.assertEqual(net.op.type, "Add")
else:
self.assertEqual(net.op.type, "MatMul")
variables = mlp.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
def testShape(self):
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes)
output = mlp(inputs)
self.assertTrue(output.get_shape().is_compatible_with(
[self.batch_size, self.output_sizes[-1]]))
self.assertEqual((self.batch_size, self.input_size), mlp.input_shape)
self.assertEqual(self.output_sizes, list(mlp.output_sizes))
@parameterized.named_parameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testRegularizersInRegularizationLosses(self, active_final, use_bias):
if use_bias:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5),
"b": tf.contrib.layers.l2_regularizer(scale=0.5)}
else:
regularizers = {"w": tf.contrib.layers.l1_regularizer(scale=0.5)}
inputs = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
mlp = snt.nets.MLP(name=self.module_name, output_sizes=self.output_sizes,
regularizers=regularizers)
mlp(inputs)
graph_regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
if use_bias:
self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
@parameterized.named_parameters(
("MLPNoFinalActBias", False, True),
("MLPNoFinalActNoBias", False, False),
("MLPFinalActBias", True, True),
("MLPFinalActNoBias", True, False),
)
def testTranspose(self, activate_final, use_bias):
with tf.variable_scope("scope1"):
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=activate_final,
use_bias=use_bias)
with tf.variable_scope("scope2"):
mlp_transpose = mlp.transpose()
self.assertEqual("scope1/" + self.module_name, mlp.scope_name)
self.assertEqual(self.module_name, mlp.module_name)
self.assertEqual("scope2/" + self.module_name + "_transpose",
mlp_transpose.scope_name)
self.assertEqual(self.module_name + "_transpose",
mlp_transpose.module_name)
input_to_mlp = tf.placeholder(tf.float32,
shape=[self.batch_size, self.input_size])
with self.assertRaisesRegexp(snt.Error,
"Variables in {} not instantiated yet, "
"__call__ the module first."
.format(mlp.layers[-1].scope_name)):
mlp_transpose(input_to_mlp)
mlp_transpose = mlp.transpose(name="another_mlp_transpose")
mlp_out = mlp(input_to_mlp)
mlp_transposed_output = mlp_transpose(mlp_out)
self.assertEqual(mlp_transposed_output.get_shape(),
input_to_mlp.get_shape())
self.assertEqual(mlp_transpose.use_bias, mlp.use_bias)
self.assertEqual(mlp_transpose.activate_final, mlp.activate_final)
if activate_final:
self.assertEqual(mlp_transposed_output.op.type, "Relu")
elif use_bias:
self.assertEqual(mlp_transposed_output.op.type, "Add")
else:
self.assertEqual(mlp_transposed_output.op.type, "MatMul")
for i in range(0, len(mlp.layers)):
self.assertEqual(mlp_transpose.layers[i].output_size,
mlp.layers[-1 - i].input_shape[1])
data = np.random.rand(self.batch_size, self.input_size)
init = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init)
sess.run(mlp_transposed_output, feed_dict={input_to_mlp: data})
variables = mlp_transpose.get_variables()
if use_bias:
self.assertEqual(len(variables), len(self.output_sizes) * 2)
else:
self.assertEqual(len(variables), len(self.output_sizes))
# Test transpose method's activate_final arg.
mlp_activate_final = mlp.transpose(activate_final=True)
mlp_no_activate_final = mlp.transpose(activate_final=False)
mlp_inherit_activate_final = mlp.transpose()
self.assertEqual(True, mlp_activate_final.activate_final)
self.assertEqual(False, mlp_no_activate_final.activate_final)
self.assertEqual(mlp.activate_final,
mlp_inherit_activate_final.activate_final)
def testVariableMap(self):
"""Tests for regressions in variable names."""
use_bias = True
var_names_w = [
u"mlp/linear_0/w:0",
u"mlp/linear_1/w:0",
u"mlp/linear_2/w:0",
]
var_names_b = [
u"mlp/linear_0/b:0",
u"mlp/linear_1/b:0",
u"mlp/linear_2/b:0",
]
correct_variable_names = set(var_names_w + var_names_b)
mlp = snt.nets.MLP(name=self.module_name,
output_sizes=self.output_sizes,
activate_final=False,
use_bias=use_bias)
input_shape = [10, 100]
input_to_net = tf.placeholder(tf.float32, shape=input_shape)
_ = mlp(input_to_net)
variable_names = [var.name for var in mlp.get_variables()]
self.assertEqual(set(variable_names), set(correct_variable_names))
def testCustomGettersUsed(self):
pi = 3.1415
def get_pi(getter, *args, **kwargs):
"""A custom getter which sets all variables to pi."""
variable = getter(*args, **kwargs)
return variable * 0.0 + pi
mlpi = snt.nets.MLP(output_sizes=[10], custom_getter=get_pi)
mlpi(tf.zeros(shape=(2, 1)))
mlp_variables = [mlpi.layers[0].w, mlpi.layers[0].b]
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
for var_value in sess.run(mlp_variables):
self.assertAllClose(var_value, np.zeros_like(var_value)+pi)
if __name__ == "__main__":
tf.test.main()
| |
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for template_helpers."""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import hashlib
import os
import textwrap
from google.apputils import basetest
# pylint: disable=unused-import
from googleapis.codegen import django_helpers
from googleapis.codegen import template_helpers
from django import template as django_template # pylint: disable=g-bad-import-order
class TemplateHelpersTest(basetest.TestCase):
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
def testExtractCommentPrefix(self):
self.assertEquals(' *',
template_helpers._ExtractCommentPrefix(' * hello'))
self.assertEquals(' *',
template_helpers._ExtractCommentPrefix(' *hello'))
self.assertEquals('//',
template_helpers._ExtractCommentPrefix('// hello'))
self.assertEquals('#',
template_helpers._ExtractCommentPrefix('# hello'))
self.assertEquals(' #',
template_helpers._ExtractCommentPrefix(' # hello'))
def testDivideIntoBlocks(self):
test = """
// block 1
//
// block 2a
// block 2a
// block 3
// """
blocks = []
for block in template_helpers._DivideIntoBlocks(test.split('\n'),
' //'):
blocks.append(block)
self.assertEquals(3, len(blocks))
self.assertEquals(1, len(blocks[0]))
self.assertEquals(2, len(blocks[1]))
self.assertEquals(1, len(blocks[2]))
def testCommentFragment(self):
value = '123456789 ' * 15
indent = 6
# What we expect is that 9 of the sequences above will fit on the first
# line, then we wrap. It's only 89 because the trailing space is trimmed.
expected = value[:89] + '\n' + (' ' * indent) + ' * ' + value[90:-1]
self.assertEquals(expected,
template_helpers.java_comment_fragment(value, indent))
def testCommentBlockJavaDoc(self):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
value = """
* %s %s
* %s %s %s
* """ % (alphabet, alphabet, alphabet, alphabet, alphabet)
expected = """
* %s %s %s
* %s %s""" % (alphabet, alphabet, alphabet, alphabet, alphabet)
self.assertEquals(expected, template_helpers.block_comment(value))
value = """
// %s %s
// %s %s %s
// """ % (alphabet, alphabet, alphabet, alphabet, alphabet)
expected = """
// %s %s %s
// %s %s""" % (alphabet, alphabet, alphabet, alphabet, alphabet)
self.assertEquals(expected, template_helpers.block_comment(value))
value = '// %s %s %s %s' % ((alphabet,) * 4)
expected = '// %s %s %s\n// %s' % ((alphabet,) * 4)
self.assertEquals(expected, template_helpers.block_comment(value))
def testCommentBlockPerLanguage(self):
text = ('Confectis bellis quinquiens triumphavit, post devictum '
'Scipionem quater eodem mense, sed interiectis diebus, et '
'rursus semel post superatos Pompei liberos.')
tmpl_tmpl = textwrap.dedent("""
{%% language %s %%}
{%% filter block_comment %%}
%s {{ text }}
{%% endfilter %%}
""")
def TestLanguage(language):
lang_defaults = template_helpers._language_defaults[language]
comment_start = lang_defaults[template_helpers._COMMENT_START]
line_width = lang_defaults[template_helpers._LINE_WIDTH]
source = tmpl_tmpl % (language, comment_start)
result = django_helpers._DjangoRenderTemplateSource(
source, {'text': text})
for line in result.splitlines():
len_line = len(line)
self.assertTrue(len_line <= line_width,
'%d should be less than %d for %s' % (
len_line, line_width, language))
for language in sorted(template_helpers._language_defaults):
TestLanguage(language)
def testNoblanklines(self):
self.assertEquals('a\nb', template_helpers.noblanklines('a\nb'))
self.assertEquals('a\nb', template_helpers.noblanklines('a\nb\n\n'))
self.assertEquals('a\nb', template_helpers.noblanklines('\na\n\nb\n'))
def _GetContext(self, data=None):
return django_template.Context(data or {})
def testCollapseNewLines(self):
context = self._GetContext()
class NodesList(object):
def __init__(self, ret):
self._ret = ret
def render(self, unused_context): # pylint: disable=g-bad-name
return self._ret
collapse_node = template_helpers.CollapsedNewLinesNode(NodesList('ab'))
self.assertEquals('ab', collapse_node.render(context))
collapse_node = template_helpers.CollapsedNewLinesNode(NodesList('a\nb'))
self.assertEquals('a\nb', collapse_node.render(context))
collapse_node = template_helpers.CollapsedNewLinesNode(NodesList('a\n\nb'))
self.assertEquals('a\n\nb', collapse_node.render(context))
collapse_node = template_helpers.CollapsedNewLinesNode(
NodesList('a\n\n\nb'))
self.assertEquals('a\n\nb', collapse_node.render(context))
collapse_node = template_helpers.CollapsedNewLinesNode(
NodesList('a\n\n\n\nb'))
self.assertEquals('a\n\nb', collapse_node.render(context))
def testDocCommentBlocks(self):
def Render(language, text, block):
context = self._GetContext()
lang_node = template_helpers.LanguageNode(language)
lang_node.render(context)
doc_comment_node = template_helpers.DocCommentNode(
text=text, comment_type='doc', wrap_blocks=block)
return doc_comment_node.render(context)
s1 = [('We can all agree that this comment is '
'almost certain to be too long for a '),
'single line due to its excessive verbosity.']
s2 = 'This is short and sweet.'
text = '\n'.join([''.join(s1), s2])
no_blocks = Render('cpp', text, False)
with_blocks = Render('cpp', text, True)
self.assertNotEqual(no_blocks, with_blocks)
self.assertTrue((' * %s' % s2) in no_blocks)
self.assertTrue(s1[1] + ' ' + s2 in with_blocks)
def testWrapInComment(self):
text = textwrap.dedent("""\
Line one.
Line three.
Line five.
""")
expected = textwrap.dedent("""\
/**
* Line one.
*
* Line three.
*
* Line five.
*/""")
for should_wrap in (True, False):
wrapped = template_helpers._WrapInComment(
text, wrap_blocks=should_wrap, start_prefix='/**',
continue_prefix=' * ',
comment_end=' */', begin_tag='', end_tag='',
available_width=80)
self.assertEquals(expected, wrapped)
def testDocCommmentsEol(self):
source_tmpl = textwrap.dedent("""\
{% language java %}
{% doc_comment XXX %}
Sets the '<code>{{ p.wireName }}</code>' attribute.
{% if p.deprecated %}
@deprecated
{% endif %}
@param[in] value {{ p.description }}
{% enddoc_comment %}
""")
for should_block in ('block', 'noblock'):
source = source_tmpl.replace('XXX', should_block)
template = django_template.Template(source)
context = self._GetContext({
'p': {
'deprecated': True,
'wireName': 'foobar',
'description': 'A description.',
}})
rendered = template.render(context)
expected = (
'\n'
'/**\n'
' * Sets the \'<code>foobar</code>\' attribute.\n'
' *\n'
' * @deprecated\n'
' *\n'
' * @param[in] value A description.\n'
' */\n')
self.assertEquals(expected, rendered, 'should block is %s' % should_block)
def testDocComments(self):
def TryDocComment(language, input_text, expected):
context = self._GetContext()
lang_node = template_helpers.LanguageNode(language)
lang_node.render(context)
context['_LINE_WIDTH'] = 50 # to make expected easier to read
doc_comment_node = template_helpers.DocCommentNode(
text=input_text, comment_type='doc')
self.assertEquals(expected, doc_comment_node.render(context))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
# single line java and php
value = '%s' % alphabet
expected = '/** %s */' % alphabet
TryDocComment('java', value, expected)
TryDocComment('php', value, expected)
# single line csharp and cpp
value = 'Hello, World!'
TryDocComment('cpp', value, '/** %s */' % value)
TryDocComment('csharp', value, '/// <summary>%s</summary>' % value)
# single line but with '\n' in it
value = '123\n456'
expected_expansion = '123 456'
# NOTE(user): 20130111
# Java and PHP have their own special methods for handling comments.
# I think this case is wrong, but am not addressing it at this time
# since it is still syntactically correct.
TryDocComment('java', value, '/**\n * %s\n */' % expected_expansion)
TryDocComment('php', value, '/**\n * %s\n */' % expected_expansion)
TryDocComment('cpp', value, '/**\n * %s\n */' % expected_expansion)
TryDocComment('csharp', value,
'/// <summary>%s</summary>' % expected_expansion)
# multi line java and php
value = '%s %s %s' % (alphabet, alphabet, alphabet)
expected = '/**\n * %s\n * %s\n * %s\n */' % (alphabet, alphabet, alphabet)
TryDocComment('java', value, expected)
TryDocComment('php', value, expected)
# single line csharp and c++
value = alphabet
TryDocComment('csharp', value, '/// <summary>%s</summary>' % value)
TryDocComment('cpp', value, '/** %s */' % value)
# multi line csharp
value = '%s %s %s' % (alphabet, alphabet, alphabet)
expected_expansion = '%s\n/// %s\n/// %s' % (alphabet, alphabet, alphabet)
TryDocComment('csharp', value,
'/// <summary>%s</summary>' % expected_expansion)
expected_expansion = '%s\n * %s\n * %s' % (alphabet, alphabet, alphabet)
TryDocComment('cpp', value, '/**\n * %s\n */' % expected_expansion)
def testCallTemplate(self):
source = 'abc {% call_template _call_test foo bar qux api.xxx %} def'
template = django_template.Template(source)
rendered = template.render(self._GetContext({
'template_dir': self._TEST_DATA_DIR,
'api': {
'xxx': 'yyy'
},
'bar': 'baz'
}))
self.assertEquals('abc 1baz1 2yyy2 3yyy3 def', rendered)
def testCallTemplateOutOfDirectory(self):
source = 'abc {% call_template ../_out_of_dir %} def'
template = django_template.Template(source)
rendered = template.render(self._GetContext({
'template_dir': os.path.join(self._TEST_DATA_DIR, 'languages'),
}))
self.assertEquals('abc OUT OF DIR def', rendered)
def testCallTemplateWithEqualsSyntax(self):
source = 'abc {% call_template _call_test foo=bar qux=api.xxx %} def'
template = django_template.Template(source)
rendered = template.render(self._GetContext({
'template_dir': self._TEST_DATA_DIR,
'api': {
'xxx': 'yyy'
},
'bar': 'baz'
}))
self.assertEquals('abc 1baz1 2yyy2 3yyy3 def', rendered)
def testCallTemplateRestoreVar(self):
"""Make sure variable stacking happens correctly on call_template."""
source = 'abc {% call_template _call_test foo bar qux api.xxx %} {{foo}}'
template = django_template.Template(source)
rendered = template.render(self._GetContext({
'template_dir': self._TEST_DATA_DIR,
'api': {
'xxx': 'yyy'
},
'bar': 'baz',
'foo': 'OrigFoo'
}))
self.assertEquals('abc 1baz1 2yyy2 3yyy3 OrigFoo', rendered)
def testParamList(self):
source = """method({% parameter_list %}
{% parameter %}int a{% end_parameter%}
{% parameter %}
{% if false %}
The condition fails, so the entire parameter is empty.
{% endif %}
{% end_parameter %}
{% parameter %}string b{% end_parameter %}
{% end_parameter_list %})"""
template = django_template.Template(source)
rendered = template.render(self._GetContext())
self.assertEquals('method(int a, string b)', rendered)
def testParamEscaping(self):
source = """method({% parameter_list %}
{% parameter %}JsonCppArray<string> a{% end_parameter %}
{% end_parameter_list %})"""
template = django_template.Template(source)
rendered = template.render(self._GetContext({}))
self.assertEquals('method(JsonCppArray<string> a)', rendered)
source = """method({% parameter_list %}
{% parameter %}{{ foo }} a{% end_parameter %}
{% end_parameter_list %})"""
template = django_template.Template(source)
rendered = template.render(self._GetContext(
{'foo': 'JsonCppArray<string>'}))
# HTML escaping has not been turned off
self.assertEquals('method(JsonCppArray<string> a)', rendered)
source = '{% language cpp %}' + source
template = django_template.Template(source)
rendered = template.render(self._GetContext(
{'foo': 'JsonCppArray<string>'}))
self.assertEquals('method(JsonCppArray<string> a)', rendered)
source = """{% language cpp %}
{% call_template _escape_test foo foo %}
"""
template = django_template.Template(source)
rendered = template.render(self._GetContext(
{'template_dir': self._TEST_DATA_DIR,
'foo': 'JsonCppArray<string>'})).strip()
self.assertEquals('method(JsonCppArray<string> a)', rendered)
def testImportWithoutManager(self):
expected = """import hello_world
import abc"""
source = '{% imports x %}\n' + expected + '\n{% endimports %}'
template = django_template.Template(source)
rendered = template.render(self._GetContext({'x': {}}))
self.assertEquals(expected, rendered)
def testNoEol(self):
def TryIt(source, expected, ctxt=None):
template = django_template.Template(source)
rendered = template.render(self._GetContext(ctxt))
self.assertEquals(expected, rendered)
source = textwrap.dedent("""\
{% noeol %}
public{% sp %}
get
{{ name }}() {
{% eol %}
return
{% sp %}
{{ x }};
{% if thing %}{% eol %}{% endif %}
}
{% endnoeol %}""")
expected = 'public getFoo() {\n return foo;\n}'
TryIt(source, expected, {'name': 'Foo', 'x': 'foo', 'thing': '1'})
source = textwrap.dedent("""\
{% noeol %}
First {{ name }} Later
{% endnoeol %}""")
expected = 'First Bob Later'
TryIt(source, expected, {'name': 'Bob'})
def testNoBlank(self):
def TryIt(source, expected, ctxt=None):
template = django_template.Template(source)
rendered = template.render(self._GetContext(ctxt))
self.assertEquals(expected, rendered)
source = textwrap.dedent("""\
{% noblank %}
This is all going to be fine.
Don't be alarmed.
There are no empty lines here.
{% endnoblank %}""")
expected = ('This is all going to be fine.\n'
'Don\'t be alarmed.\n'
'There are no empty lines here.\n')
TryIt(source, expected, {})
source = textwrap.dedent("""\
{% noblank %}
This is all going to be fine.
Don't be alarmed.
There is one empty line here.
{% eol %}
{% endnoblank %}""")
expected = ('This is all going to be fine.\n'
'Don\'t be alarmed.\n'
'There is one empty line here.\n\n')
TryIt(source, expected, {})
def testNestedNoBlank(self):
source = textwrap.dedent("""\
{% noblank %}
Foo
{% noeol %}
Bar
{% eol %}
{% endnoeol %}
{% eol %}
{% endnoblank %}X
""")
expected = 'Foo\nBar\n\nX\n'
template = django_template.Template(source)
self.assertEquals(expected, template.render(self._GetContext({})))
def testNoBlankRecurse(self):
def TryIt(source, expected):
ctxt = self._GetContext({
'template_dir': self._TEST_DATA_DIR
})
template = django_template.Template(source)
gotten = template.render(ctxt)
self.assertEquals(expected, gotten)
recurse_source = textwrap.dedent("""\
{% noblank recurse %}
{% call_template _eoltest %}
{% endnoblank %}
""")
recurse_expected = '|\n|\nX\nX\n'
TryIt(recurse_source, recurse_expected)
norecurse_source = textwrap.dedent("""\
{% noblank %}
{% call_template _eoltest %}
{% endnoblank %}
""")
norecurse_expected = '|\n|\n\n\nX\n\n\nX\n'
TryIt(norecurse_source, norecurse_expected)
recurse_source = textwrap.dedent("""\
{% noblank recurse %}
{% call_template _eoltest2 %}
{% endnoblank %}
""")
recurse_expected = '|\n|\n\n\nX\nX\n'
TryIt(recurse_source, recurse_expected)
norecurse_source = textwrap.dedent("""\
{% noblank %}
{% call_template _eoltest2 %}
{% endnoblank %}
""")
norecurse_expected = '|\n|\n\n\nX\n\nX\n'
TryIt(norecurse_source, norecurse_expected)
def testLiteral(self):
def TryTestLiteral(language, input_text, expected):
context = self._GetContext({
'foo': 'foo\nb"a$r',
'bar': 'baz',
'pattern': '\\d{4}-\\d{2}-\\d{2}'})
lang_node = template_helpers.LanguageNode(language)
lang_node.render(context)
context['_LINE_WIDTH'] = 50 # to make expected easier to read
node = template_helpers.LiteralStringNode(input_text)
self.assertEquals(expected, node.render(context))
TryTestLiteral('dart', ['foo', 'bar'], '"foo\\nb\\"a\\$rbaz"')
TryTestLiteral('java', ['foo'], '"foo\\nb\\"a$r"')
TryTestLiteral('java', ['bar'], '"baz"')
TryTestLiteral('java', ['pattern'], '"\\\\d{4}-\\\\d{2}-\\\\d{2}"')
TryTestLiteral('objc', ['foo'], '@"foo\\nb\\"a$r"')
TryTestLiteral('php', ['foo', 'bar'], """'foo\nb"a$rbaz'""")
def testCopyright(self):
copyright_text = 'MY COPYRIGHT TEXT'
expected_license_preamble = 'Licensed under the Apache License'
template = django_template.Template(
'{% language java %}{% copyright_block %}')
context = self._GetContext({
'template_dir': self._TEST_DATA_DIR,
'api': {},
})
text_without_copyright = template.render(context)
license_pos = text_without_copyright.find(expected_license_preamble)
self.assertLess(3, license_pos)
self.assertEquals(-1, text_without_copyright.find(copyright_text))
context['api']['copyright'] = copyright_text
text_with_copyright = template.render(context)
license_pos_with_copyright = text_with_copyright.find(
expected_license_preamble)
self.assertLess(license_pos, license_pos_with_copyright)
copyright_pos = text_with_copyright.find(copyright_text)
self.assertEquals(license_pos, copyright_pos)
def testGetArgFromToken(self):
# This tests indirectly by going through a few tags known to call
# _GetArgFromToken. That expedient avoids having to create a token stream
# at a low level.
# try a good one
template = django_template.Template('{% camel_case foo %}')
context = self._GetContext({'foo': 'hello_world'})
self.assertEquals('HelloWorld', template.render(context))
# Missing the arg
for tag in ['language', 'comment_if', 'doc_comment_if']:
try:
template = django_template.Template('{%% %s %%}' % tag)
self.fail('TemplateSyntaxError not raised')
except django_template.TemplateSyntaxError as e:
self.assertEquals('tag requires a single argument: %s' % tag, str(e))
def testCache(self):
loader = template_helpers.CachingTemplateLoader()
template_dir = os.path.join(self._TEST_DATA_DIR, 'languages')
test_path = os.path.join(template_dir, 'php/1.0dev/test.tmpl')
stable_path = os.path.join(template_dir, 'php/1.0/test.tmpl')
loader.GetTemplate(test_path, template_dir)
loader.GetTemplate(stable_path, template_dir)
self.assertTrue(stable_path in loader._cache)
self.assertFalse(test_path in loader._cache)
def testHalt(self):
# See that it raises the error
template = django_template.Template('{% halt %}')
context = self._GetContext({})
self.assertRaises(
template_helpers.Halt, template.render, context)
# But make sure it raises on execution, not parsing. :-)
template = django_template.Template('{% if false %}{% halt %}{% endif %}OK')
context = self._GetContext({})
self.assertEquals('OK', template.render(context))
def testBool(self):
source = '{% bool x %}|{% bool y %}'
def Test(language, x):
ctxt = self._GetContext({'_LANGUAGE': language, 'x': x, 'y': not x})
template = django_template.Template(source)
key = template_helpers._BOOLEAN_LITERALS
vals = template_helpers._language_defaults[language].get(
key, template_helpers._defaults[key])
if x:
# If x, true precedes false in the output.
vals = vals[::-1]
expected = '|'.join(vals)
self.assertEquals(expected, template.render(ctxt))
for language in template_helpers._language_defaults:
for value in (True, False, 'truthy string', ''):
Test(language, value)
def testDivChecksum(self):
source = '<p>This is some test text.</p>'
context = self._GetContext()
template = django_template.Template(
'{% checksummed_div %}'
'someId'
'{% divbody %}' + source + '{% endchecksummed_div %}')
checksum = hashlib.sha1(source).hexdigest()
expected = ('<div id="someId" checksum="%s">' % checksum +
source +
'</div>')
self.assertEquals(expected, template.render(context))
def testWrite(self):
self.name_to_content = {}
def MyWriter(name, content):
"""Capture the write event."""
self.name_to_content[name] = content
template = django_template.Template(
'a{% write file1 %}foo{% endwrite %}'
'b{% write file2 %}bar{% endwrite %}')
context = self._GetContext({
template_helpers.FILE_WRITER: MyWriter,
'file1': 'x',
'file2': 'y',
})
self.assertEquals('ab', template.render(context))
self.assertEquals('foo', self.name_to_content['x'])
self.assertEquals('bar', self.name_to_content['y'])
class TemplateGlobalsTest(basetest.TestCase):
def testSetContext(self):
self.assertIsNone(template_helpers.GetCurrentContext())
data = {'key': 'value'}
with template_helpers.SetCurrentContext(data):
ctxt = template_helpers.GetCurrentContext()
self.assertIsNotNone(ctxt)
self.assertEquals('value', ctxt['key'])
self.assertIsNone(template_helpers.GetCurrentContext())
if __name__ == '__main__':
basetest.main()
| |
import logging
import base64
import json
import re
from django.core.urlresolvers import resolve
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, REDIRECT_FIELD_NAME, logout
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.six.moves import StringIO
from django.conf import settings
from django.template.response import TemplateResponse
from django.contrib.auth.models import User
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.core.urlresolvers import Resolver404
from rest_framework import viewsets
from rest_framework.parsers import FormParser
from rest_framework.renderers import JSONRenderer
from rest_framework_expiring_authtoken.models import ExpiringToken
from .models import CRUDFilterModel, CRUDException
from .serializers import AbstractModelSerializer
from .forms import RoleForm
from .managers import CRUDManager
from django.contrib.sites.shortcuts import get_current_site
logger = logging.getLogger('crud_filters')
@sensitive_post_parameters()
@csrf_protect
@never_cache
def choose_role(request, template_name='choose_role.html',
redirect_field_name=REDIRECT_FIELD_NAME,
role_form=RoleForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = role_form(request, data=request.POST)
if form.is_valid():
if form.data['role'].lower() == "anonymous":
logout(request)
request.session.update({'crud-role': form.data['role']})
# Redirect to the "choose_filters" page, preserving the "?next=" GET params.
url = "/choose_filters/?" + redirect_field_name + "=" + redirect_to
return HttpResponseRedirect(url)
else:
form = role_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@sensitive_post_parameters()
@csrf_protect
@never_cache
def choose_filters(request, template_name='choose_filters.html',
redirect_field_name=REDIRECT_FIELD_NAME,
role_form=RoleForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = role_form(request, data=request.POST)
if form.is_valid():
if 'filter' in form.data.keys():
filter_string = form.data['filter']
else:
filter_string = "__default"
request.session.update({'crud-filters': filter_string})
return HttpResponseRedirect(redirect_to)
else:
form = role_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
class CRUDFilterModelViewSet(viewsets.ModelViewSet):
# Defaults to none, and must be set by child class
crud_model = None
obj_id = None
# By default, we implement an empty serializer. If the overriding crud_model is not an abstract model,
# the user should override this variable or provide a get_serializer() function.
serializer_class = AbstractModelSerializer
# Allow the user to pass the "Role" header as a GET parameter (e.g. "?as_admin").
# Insecure, and not recommended in the least.
ALLOW_ROLE_GET_PARAMS = getattr(settings, "CRUD_ALLOW_ROLE_GET_PARAMS", False)
def _parse_authentication(self):
"""
Parse the authentication method and update the request object
to include appropriately parsed authorization information
"""
self.header_token = self.request.META.get('HTTP_AUTHORIZATION', None)
if self.header_token is not None:
auth = self.request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
uname, colon, passwd = base64.b64decode(auth[1]).decode("utf-8").partition(':')
self.user = authenticate(username=uname, password=passwd)
if self.user is None:
# Credentials were provided, but they were invalid (bad username/password).
logger.exception("User with username '{}' attempted to login with basic auth, but their credentials were invalid. ".format(uname))
raise CRUDException("Bad credentials", 401)
elif auth[0].lower() == "token":
try:
token = auth[1]
token_obj = ExpiringToken.objects.get(key=token)
if token_obj.expired():
self.user = None
# Credentials were provided, but they were invalid (expired token).
logger.exception("Attempted login with expired token.")
raise CRUDException("Token has expired", 401)
else:
self.user = token_obj.user
except ExpiringToken.DoesNotExist:
self.user = None
# Credentials were provided, but they were invalid (bad or expired token).
logger.exception("User attempted to login with token auth, but their credentials were invalid. ")
raise CRUDException("Bad credentials", 401)
elif '_auth_user_id' in self.request.session.keys():
self.user = User.objects.get(id=self.request.session['_auth_user_id'])
def _enforce_role_access_control(self):
try:
self.request.crud_filters = self.request.META['HTTP_FILTERS'].strip(" ").split(",")
except KeyError:
try:
self.request.crud_filters = self.request.META['HTTP_CRUD_FILTERS'].strip(" ").split(",")
except KeyError:
# Check session for filters
self.request.crud_filters = []
if 'crud-filters' in self.request.session.keys():
self.request.crud_filters.append(self.request.session['crud-filters'])
else:
# We didn't find any filters in the headers, let's look at GET params
for key in self.request.GET.keys():
# Only grab filters that are legit CRUDFilters (soon, we'll remove this and rely solely on
# the crud_filters header):
filters_for_this_role = CRUDManager.get_filter_set_for_model(self.crud_model)['filter'][self.request.crud_role]
valid_filters_for_this_role = [filter_name for filter_name, filter_value in filters_for_this_role.items() if filter_value is not None and filter_name != "__default"]
if key in valid_filters_for_this_role:
self.request.crud_filters.append(key)
# Lowercase the filters:
if not hasattr(self.request, 'crud_filters') or len(self.request.crud_filters) == 0:
self.request.crud_filters = ['__default']
else:
self.request.crud_filters = [f.lower() for f in self.request.crud_filters]
# For Update and Delete requests, make sure we have an ID (properly formed request)
# TODO: update should be able to touch multiple objects, or update entire querysets
if self.request.crud_operation == 'U':
self.check_request_body_for_id(self.request)
elif self.request.crud_operation == 'D':
self.check_request_url_for_id(self.request)
# Cursory check to make sure we have permissions on this view:
self.crud_model.check_for_permissions(self.request.user, self.request.crud_role, self.request.crud_operation, self.request.crud_filters)
# Retrieve (GET) operations will perform get_queryset later
# Create (POST) operations don't need to get a queryset
if self.request.crud_operation in ['U', 'D']:
# Check that the object in question is in the queryset
if not self.check_object_for_permissions(self.request):
logger.exception("Operation {} cannot be performed on requested object".format(self.request.crud_operation))
raise CRUDException("Cannot perform this operation on this object.", status_code=403)
# For the time being, this only works with token and basic auth.
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Perform simple authentication, then check that this user can use this role
to perform this action (on this item).
"""
if not hasattr(view_func, 'cls'):
return None
if not isinstance(view_func.cls(), CRUDFilterModelViewSet):
return None
# Create an instance of the ViewSet and get some variables from it.
self.crud_model = view_func.cls().crud_model
self.lookup_field = view_func.cls().lookup_field
# Perform some authentication (Token and Basic).
# TODO: figure out how to go AFTER DRF auth, so we have an authenticated user.
self.request = request
self.user = None
# CAUTION!!!
try:
self._parse_authentication()
except CRUDException as exception:
return HttpResponse(exception.message, status=exception.status_code)
if self.user is None:
self.request.crud_role = "anonymous"
else:
# Default to "authenticated"
self.request.crud_role = "authenticated"
self.request.user = self.user
try:
self.request.crud_role = self.request.META['HTTP_ROLE'].lower()
except KeyError:
try:
self.request.crud_role = self.request.META['HTTP_CRUD_ROLE'].lower()
except KeyError:
if 'crud-role' in request.session.keys():
self.request.crud_role = self.request.session['crud-role']
# We didn't find a role in the headers or session, let's look in the GET params.
elif self.ALLOW_ROLE_GET_PARAMS:
for key in self.request.GET.keys():
if key.startswith("as_"):
self.request.crud_role = key[3:].lower()
if self.crud_model is None:
logger.exception("CRUDFilterModel not specified on CRUDFilterModelViewSet {}".format(self))
raise CRUDException("You must specify a CRUDFilterModel for this CRUDFilterModelViewSet", 500)
return
elif not issubclass(self.crud_model, CRUDFilterModel):
logger.exception("CRUDFilterModel specified on CRUDFilterModelViewSet {} does not extend the CRUDFilter model".format(self))
raise CRUDException("crud_model for CRUDFilterModelViewSet must extend CRUDFilterModel", 500)
method = self.request.method
if method == 'POST':
self.request.crud_operation = 'C'
elif method in ['GET', 'HEAD']:
self.request.crud_operation = 'R'
elif method in ['PUT', 'PATCH']:
self.request.crud_operation = 'U'
elif method == 'DELETE':
self.request.crud_operation = 'D'
elif method == 'OPTIONS':
# TODO: tell the user what their options are here, given their desired role.
# e.g. return_options_menu_for_this_user()
return HttpResponse("Coming soon", status=405)
else:
return HttpResponse("Method not allowed", status=405)
try:
self._enforce_role_access_control()
except CRUDException as exception:
return HttpResponse(exception.message, status=exception.status_code)
# We're good, let's move on!
return None
def process_exception(self, request, exception):
"""
Middleware method to turn CRUDExceptions into proper HTTPResponses.
"""
# TODO: why is this not catching CRUD exceptions?? We shouldn't have to catch exceptions in process_view
if isinstance(exception, CRUDException):
return HttpResponse(exception.message, status=exception.status_code)
return None
def process_response(self, request, response):
if 'HTTP_ACCEPT' in request.META.keys() and 'text/html' in request.META['HTTP_ACCEPT']:
try:
path = resolve(request.path)
except Resolver404:
pass
else:
if hasattr(path.func, 'cls') and hasattr(path.func.cls, 'crud_model'):
request._request = request
if not hasattr(response, 'data'):
content = {"Error": response.content}
else:
renderer = JSONRenderer()
renderer_context = {'indent': 4}
content = renderer.render(response.data, "application/json", renderer_context)
renderer_context = {
'content': content,
'request': request,
'response': response,
'args': {},
'kwargs': {}
}
return TemplateResponse(request, "api.html", renderer_context).render()
return response
def get_queryset(self):
"""
Overrides get_queryset for the ViewSet.
TODO: At startup, warn if any views are overriding this function.
"""
if self.request.crud_operation is None:
operation = 'R'
else:
operation = self.request.crud_operation
return self.try_to_get_queryset(self.request, operation)
def try_to_get_queryset(self, request, operation):
"""
Function to actually get queryset from the model. This is used by get_queryset, and
separately to check if Update and Delete operations can act on the object in question.
"""
if operation.upper() in ['U', 'D']:
if not self.obj_id:
self.check_request_url_for_id(request)
queryset = self.crud_model.get_queryset_or_false(request.user, request.crud_role, operation, filters=request.crud_filters, request=request, _id=self.obj_id, lookup_field=self.lookup_field)
else:
queryset = self.crud_model.get_queryset_or_false(request.user, request.crud_role, operation, filters=request.crud_filters, request=request, lookup_field=self.lookup_field)
if queryset is False:
raise CRUDException("Operation is not available for this user", status_code=403)
else:
return queryset
def check_object_for_permissions(self, request):
"""
If we've gotten this far, this user with this role is allowed to
use this method on this view. Now we just need to make sure the
object they're trying to act on is in the queryset we've defined
for this view.
"""
if not self.obj_id:
raise Exception("check_object_for_permissions called without a proper self.obj_id")
# Build query based on lookup_field
kwargs = {'{0}'.format(self.lookup_field): self.obj_id}
if self.get_queryset().filter(**kwargs).count() > 0:
return True
else:
return False
def check_request_body_for_id(self, request):
"""
An Update (PUT/PATCH) request must contain the ID of the object
to be updated. Later, we can allow Update of multiple objects, or the
entire queryset.
"""
id = None
try:
data = request.data
id = data[self.lookup_field]
except (AttributeError, MultiValueDictKeyError):
try:
if "application/json" in request.META['CONTENT_TYPE']:
str_data = request.body.decode('utf-8')
# Make this into a properly-formatted JSON string.
id = self.id_from_json(str_data)
elif "multipart/form-data" in request.META['CONTENT_TYPE']:
if self.lookup_field is 'id' or self.lookup_field is 'pk':
lookup_field_string = "(?:id|pk)"
else:
lookup_field_string = self.lookup_field
expression = re.compile('name="{lookup_field}"\r\n\r\n([^\r]+)\r\n'.format(lookup_field=lookup_field_string))
id_set = False
iterator = expression.finditer(request.body.decode('utf-8'))
for match in iterator:
id = match.groups()[0]
id_set = True
if not id_set:
id = self.id_from_json(request.body.decode('utf-8'))
elif "application/x-www-form-urlencoded" in request.META['CONTENT_TYPE']:
parser = FormParser()
stream = StringIO(request.body.decode('utf-8'))
data = parser.parse(stream)
if self.lookup_field is 'id' or self.lookup_field is 'pk':
if 'id' in data:
id = data['id']
elif 'pk' in data:
id = data['pk']
else:
id = data[self.lookup_field]
except AttributeError:
return False
except KeyError:
logger.exception("Missing lookup field {} on view {} ".format(self.lookup_field, self))
raise CRUDException("Missing {lookup_field}".format(lookup_field=self.lookup_field), 400)
except ValueError:
logger.exception("CRUDFilters received improper json.")
raise CRUDException("Improper json", 400)
try:
if id is None:
logger.exception("Missing lookup field {} on view {} ".format(self.lookup_field, self))
raise CRUDException("Missing {lookup_field}".format(lookup_field=self.lookup_field), 400)
self.obj_id = id
return self.obj_id
except KeyError:
logger.exception("Update Operations must include {} in the request body.".format(self.lookup_field))
raise CRUDException("Update operations must include " + self.lookup_field + " in the request body.", status_code=400)
def id_from_json(self, str_data):
id = None
str_data = '{str_data}'.format(str_data=str_data.replace("'", '"'))
try:
data = json.loads(str_data)
# Handle the json.dumps case, until we remove it from testing:
if isinstance(data, str):
data = json.loads(data)
if self.lookup_field is 'pk' or self.lookup_field is 'id':
if 'id' in data.keys():
id = data['id']
else:
id = data['pk']
else:
id = data[self.lookup_field]
except Exception:
return None
return id
def check_request_url_for_id(self, request):
"""
Make sure this request has id (or pk field) in request URL. Required for DRF deletion,
and for single_read GET requests.
"""
try:
self.obj_id = request.resolver_match.kwargs[self.lookup_field]
return self.obj_id
except KeyError:
logger.exception("Malformed request at URL {url}. CRUD role ({role}), filters ({filters}), operation ({operation}). Desired role {desired_role}. User {user}.".format(
url=request.path,
role=request.crud_role,
filters=str(request.crud_filters),
operation=request.crud_operation,
desired_role=request.META.get('HTTP_CRUD_ROLE', '(none)'),
user=str(request.user)
))
raise CRUDException(request.method + " operations on this endpoint must include /" + self.lookup_field + "/ in the request URL.", status_code=400)
def create(self, request, *args, **kwargs):
"""
Default empty implementation of create(). User must override this function to get
create functionality.
"""
return HttpResponse("Method create not implemented by default", status=405)
def update(self, request, *args, **kwargs):
"""
Default empty implementation of update(). User must override this function to get
update functionality.
"""
return HttpResponse("Method update not implemented by default", status=405)
def partial_update(self, request, *args, **kwargs):
"""
Default empty implementation of partial_update(). User must override this function to get
partial_update functionality.
"""
return HttpResponse("Method partial_update not implemented by default", status=405)
def patch(self, request, *args, **kwargs):
"""
Default empty implementation of patch(). User must override this function to get
patch functionality.
"""
return HttpResponse("Method patch not implemented by default", status=405)
def retrieve(self, request, *args, **kwargs):
"""
Default implementation of retrieve(). Relies on our implementation of get_queryset.
"""
# We implement GET functions by default, since we override get_queryset.
return super(CRUDFilterModelViewSet, self).retrieve(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
"""
Default implementation of list(). Relies on our implementation of get_queryset.
"""
# We implement GET functions by default, since we override get_queryset.
return super(CRUDFilterModelViewSet, self).list(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
Default implementation of delete().
"""
# We implement DELETE by default, but still check for permissions.
return super(CRUDFilterModelViewSet, self).destroy(request, *args, **kwargs)
| |
# Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import paramiko
import os
import time
from manila import exception
from manila.i18n import _
from manila import utils as mutils
LOG = log.getLogger(__name__)
class HNASSSHBackend(object):
def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key,
cluster_admin_ip0, evs_id, evs_ip, fs_name, job_timeout):
self.ip = hnas_ip
self.port = 22
self.user = hnas_username
self.password = hnas_password
self.priv_key = ssh_private_key
self.admin_ip0 = cluster_admin_ip0
self.evs_id = str(evs_id)
self.fs_name = fs_name
self.evs_ip = evs_ip
self.sshpool = None
self.job_timeout = job_timeout
LOG.debug("Hitachi HNAS Driver using SSH backend.")
def get_stats(self):
"""Get the stats from file-system.
:returns:
fs_capacity.size = Total size from filesystem.
available_space = Free space currently on filesystem.
dedupe = True if dedupe is enabled on filesystem.
"""
command = ['df', '-a', '-f', self.fs_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not get HNAS backend stats.")
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
line = output.split('\n')
fs = Filesystem(line[3])
available_space = fs.size - fs.used
return fs.size, available_space, fs.dedupe
def nfs_export_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = os.path.join('/snapshots', share_id, snapshot_id)
name = os.path.join('/snapshots', snapshot_id)
else:
path = name = os.path.join('/shares', share_id)
command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1',
name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def nfs_export_del(self, share_id=None, snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("NFS export not specified to delete.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'del', name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
LOG.warning("Export %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_add(self, share_id, snapshot_id=None):
if snapshot_id is not None:
path = r'\\snapshots\\' + share_id + r'\\' + snapshot_id
name = snapshot_id
else:
path = r'\\shares\\' + share_id
name = share_id
command = ['cifs-share', 'add', '-S', 'disable', '--enable-abe',
'--nodefaultsaa', name, self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not create CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_share_del(self, name):
command = ['cifs-share', 'del', '--target-label', self.fs_name,
name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if e.exit_code == 1:
LOG.warning("CIFS share %s does not exist on "
"backend anymore.", name)
else:
msg = _("Could not delete CIFS share %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def get_nfs_host_list(self, share_id):
export = self._get_export(share_id)
return export[0].export_configuration
def update_nfs_access_rule(self, host_list, share_id=None,
snapshot_id=None):
if share_id is not None:
name = os.path.join('/shares', share_id)
elif snapshot_id is not None:
name = os.path.join('/snapshots', snapshot_id)
else:
msg = _("No share/snapshot provided to update NFS rules.")
raise exception.HNASBackendException(msg=msg)
command = ['nfs-export', 'mod', '-c']
if len(host_list) == 0:
command.append('127.0.0.1')
else:
string_command = '"' + str(host_list[0])
for i in range(1, len(host_list)):
string_command += ',' + (str(host_list[i]))
string_command += '"'
command.append(string_command)
command.append(name)
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access rules for NFS export %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_allow_access(self, name, user, permission, is_snapshot=False):
command = ['cifs-saa', 'add', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'already listed as a user' in e.stderr:
if is_snapshot:
LOG.debug('User %(user)s already allowed to access '
'snapshot %(snapshot)s.', {
'user': user,
'snapshot': name,
})
else:
self._update_cifs_rule(name, user, permission)
else:
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
msg = _("Could not add access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def _update_cifs_rule(self, name, user, permission):
LOG.debug('User %(user)s already allowed to access '
'share %(share)s. Updating access level...', {
'user': user,
'share': name,
})
command = ['cifs-saa', 'change', '--target-label', self.fs_name,
name, user, permission]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not update access of user %(user)s to "
"share %(share)s.") % {
'user': user,
'share': name,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def cifs_deny_access(self, name, user, is_snapshot=False):
command = ['cifs-saa', 'delete', '--target-label', self.fs_name,
name, user]
entity_type = "share"
if is_snapshot:
entity_type = "snapshot"
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if ('not listed as a user' in e.stderr or
'Could not delete user/group' in e.stderr):
LOG.warning('User %(user)s already not allowed to access '
'%(entity_type)s %(name)s.', {
'entity_type': entity_type,
'user': user,
'name': name
})
else:
msg = _("Could not delete access of user %(user)s to "
"%(entity_type)s %(name)s.") % {
'user': user,
'name': name,
'entity_type': entity_type,
}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def list_cifs_permissions(self, hnas_share_id):
command = ['cifs-saa', 'list', '--target-label', self.fs_name,
hnas_share_id]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'No entries for this share' in e.stderr:
LOG.debug('Share %(share)s does not have any permission '
'added.', {'share': hnas_share_id})
return []
else:
msg = _("Could not list access of share %s.") % hnas_share_id
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
permissions = CIFSPermissions(output)
return permissions.permission_list
def tree_clone(self, src_path, dest_path):
command = ['tree-clone-job-submit', '-e', '-f', self.fs_name,
src_path, dest_path]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if ('Cannot find any clonable files in the source directory' in
e.stderr):
msg = _("Source path %s is empty.") % src_path
LOG.debug(msg)
raise exception.HNASNothingToCloneException(msg=msg)
else:
msg = _("Could not submit tree clone job to clone from %(src)s"
" to %(dest)s.") % {'src': src_path, 'dest': dest_path}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
job_submit = JobSubmit(output)
if job_submit.request_status == 'Request submitted successfully':
job_id = job_submit.job_id
job_status = None
progress = ''
job_rechecks = 0
starttime = time.time()
deadline = starttime + self.job_timeout
while (not job_status or
job_status.job_state != "Job was completed"):
command = ['tree-clone-job-status', job_id]
output, err = self._execute(command)
job_status = JobStatus(output)
if job_status.job_state == 'Job failed':
break
old_progress = progress
progress = job_status.data_bytes_processed
if old_progress == progress:
job_rechecks += 1
now = time.time()
if now > deadline:
command = ['tree-clone-job-abort', job_id]
self._execute(command)
LOG.error("Timeout in snapshot creation from "
"source path %s.", src_path)
msg = _("Share snapshot of source path %s "
"was not created.") % src_path
raise exception.HNASBackendException(msg=msg)
else:
time.sleep(job_rechecks ** 2)
else:
job_rechecks = 0
if (job_status.job_state, job_status.job_status,
job_status.directories_missing,
job_status.files_missing) == ("Job was completed",
"Success", '0', '0'):
LOG.debug("Snapshot of source path %(src)s to destination "
"path %(dest)s created successfully.",
{'src': src_path,
'dest': dest_path})
else:
LOG.error('Error creating snapshot of source path %s.',
src_path)
msg = _('Snapshot of source path %s was not '
'created.') % src_path
raise exception.HNASBackendException(msg=msg)
def tree_delete(self, path):
command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name,
path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Attempted to delete path %s "
"but it does not exist.", path)
else:
msg = _("Could not submit tree delete job to delete path "
"%s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def create_directory(self, dest_path):
self._locked_selectfs('create', dest_path)
if not self.check_directory(dest_path):
msg = _("Command to create directory %(path)s was run in another "
"filesystem instead of %(fs)s.") % {
'path': dest_path,
'fs': self.fs_name,
}
LOG.warning(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(retry_param=exception.HNASSSCContextChange, wait_random=True,
retries=5)
def delete_directory(self, path):
try:
self._locked_selectfs('delete', path)
except exception.HNASDirectoryNotEmpty:
pass
else:
if self.check_directory(path):
msg = _("Command to delete empty directory %(path)s was run in"
" another filesystem instead of %(fs)s.") % {
'path': path,
'fs': self.fs_name,
}
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
@mutils.retry(retry_param=exception.HNASSSCIsBusy, wait_random=True,
retries=5)
def check_directory(self, path):
command = ['path-to-object-number', '-f', self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'path-to-object-number is currently running' in e.stdout:
msg = (_("SSC command path-to-object-number for path %s "
"is currently busy.") % path)
raise exception.HNASSSCIsBusy(msg=msg)
if 'Unable to locate component:' in e.stdout:
LOG.debug("Cannot find %(path)s: %(out)s",
{'path': path, 'out': e.stdout})
return False
else:
msg = _("Could not check if path %s exists.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return True
def check_fs_mounted(self):
command = ['df', '-a', '-f', self.fs_name]
output, err = self._execute(command)
if "not found" in output:
msg = _("Filesystem %s does not exist or it is not available "
"in the current EVS context.") % self.fs_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
line = output.split('\n')
fs = Filesystem(line[3])
return fs.mounted
def mount(self):
command = ['mount', self.fs_name]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'file system is already mounted' not in e.stderr:
msg = _("Failed to mount filesystem %s.") % self.fs_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_create(self, vvol_name):
# create a virtual-volume inside directory
path = '/shares/' + vvol_name
command = ['virtual-volume', 'add', '--ensure', self.fs_name,
vvol_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to create vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def vvol_delete(self, vvol_name):
path = '/shares/' + vvol_name
# Virtual-volume and quota are deleted together
command = ['tree-delete-job-submit', '--confirm', '-f',
self.fs_name, path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'Source path: Cannot access' in e.stderr:
LOG.warning("Share %s does not exist.", vvol_name)
else:
msg = _("Failed to delete vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def quota_add(self, vvol_name, vvol_quota):
str_quota = str(vvol_quota) + 'G'
command = ['quota', 'add', '--usage-limit',
str_quota, '--usage-hard-limit',
'yes', self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to add %(quota)s quota to vvol "
"%(vvol)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def modify_quota(self, vvol_name, new_size):
str_quota = str(new_size) + 'G'
command = ['quota', 'mod', '--usage-limit', str_quota,
self.fs_name, vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Failed to update quota of vvol %(vvol)s to "
"%(quota)s.") % {'quota': str_quota, 'vvol': vvol_name}
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
def check_vvol(self, vvol_name):
command = ['virtual-volume', 'list', '--verbose', self.fs_name,
vvol_name]
try:
self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Virtual volume %s does not exist.") % vvol_name
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_quota(self, vvol_name):
command = ['quota', 'list', '--verbose', self.fs_name, vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError:
msg = _("Could not check quota of vvol %s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if 'No quotas matching specified filter criteria' in output:
msg = _("Virtual volume %s does not have any"
" quota.") % vvol_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_export(self, vvol_name, is_snapshot=False):
export = self._get_export(vvol_name, is_snapshot=is_snapshot)
if (vvol_name in export[0].export_name and
self.fs_name in export[0].file_system_label):
return
else:
msg = _("Export %s does not exist.") % export[0].export_name
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def check_cifs(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
if self.fs_name != cifs_share.fs:
msg = _("CIFS share %(share)s is not located in "
"configured filesystem "
"%(fs)s.") % {'share': vvol_name,
'fs': self.fs_name}
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
def is_cifs_in_use(self, vvol_name):
output = self._cifs_list(vvol_name)
cifs_share = CIFSShare(output)
return cifs_share.is_mounted
def _cifs_list(self, vvol_name):
command = ['cifs-share', 'list', vvol_name]
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("CIFS share %(share)s was not found in EVS "
"%(evs_id)s") % {'share': vvol_name,
'evs_id': self.evs_id}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list CIFS shares by vvol name "
"%s.") % vvol_name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
return output
def get_share_quota(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.limit is None:
return None
if quota.limit_unit == 'TB':
return quota.limit * units.Ki
elif quota.limit_unit == 'GB':
return quota.limit
else:
msg = _("Share %s does not support quota values "
"below 1G.") % share_id
LOG.error(msg)
raise exception.HNASBackendException(msg=msg)
def get_share_usage(self, share_id):
command = ['quota', 'list', self.fs_name, share_id]
output, err = self._execute(command)
quota = Quota(output)
if quota.usage is None:
msg = _("Virtual volume %s does not have any quota.") % share_id
LOG.error(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
bytes_usage = strutils.string_to_bytes(str(quota.usage) +
quota.usage_unit)
return bytes_usage / units.Gi
def _get_export(self, name, is_snapshot=False):
if is_snapshot:
name = '/snapshots/' + name
else:
name = '/shares/' + name
command = ['nfs-export', 'list ', name]
export_list = []
try:
output, err = self._execute(command)
except processutils.ProcessExecutionError as e:
if 'does not exist' in e.stderr:
msg = _("Export %(name)s was not found in EVS "
"%(evs_id)s.") % {
'name': name,
'evs_id': self.evs_id,
}
LOG.exception(msg)
raise exception.HNASItemNotFoundException(msg=msg)
else:
msg = _("Could not list NFS exports by name %s.") % name
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
items = output.split('Export name')
if items[0][0] == '\n':
items.pop(0)
for i in range(0, len(items)):
export_list.append(Export(items[i]))
return export_list
@mutils.retry(retry_param=exception.HNASConnException, wait_random=True)
def _execute(self, commands):
command = ['ssc', '127.0.0.1']
if self.admin_ip0 is not None:
command = ['ssc', '--smuauth', self.admin_ip0]
command += ['console-context', '--evs', self.evs_id]
commands = command + commands
mutils.check_ssh_injection(commands)
commands = ' '.join(commands)
if not self.sshpool:
self.sshpool = mutils.SSHPool(ip=self.ip,
port=self.port,
conn_timeout=None,
login=self.user,
password=self.password,
privatekey=self.priv_key)
with self.sshpool.item() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
out, err = processutils.ssh_execute(ssh, commands,
check_exit_code=True)
LOG.debug("Command %(cmd)s result: out = %(out)s - err = "
"%(err)s.", {
'cmd': commands,
'out': out,
'err': err,
})
return out, err
except processutils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
msg = _("Failed to establish SSC connection.")
LOG.debug(msg)
raise exception.HNASConnException(msg=msg)
else:
LOG.debug("Error running SSH command. "
"Command %(cmd)s result: out = %(out)s - err = "
"%(err)s - exit = %(exit)s.", {
'cmd': e.cmd,
'out': e.stdout,
'err': e.stderr,
'exit': e.exit_code,
})
raise
@mutils.synchronized("hitachi_hnas_select_fs", external=True)
def _locked_selectfs(self, op, path):
if op == 'create':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'mkdir', '-p', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if "Current file system invalid: VolumeNotFound" in e.stderr:
msg = _("Command to create directory %s failed due to "
"context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to create directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
if op == 'delete':
command = ['selectfs', self.fs_name, '\n',
'ssc', '127.0.0.1', 'console-context', '--evs',
self.evs_id, 'rmdir', path]
try:
self._execute(command)
except processutils.ProcessExecutionError as e:
if 'DirectoryNotEmpty' in e.stderr:
msg = _("Share %s has more snapshots.") % path
LOG.debug(msg)
raise exception.HNASDirectoryNotEmpty(msg=msg)
elif 'cannot remove' in e.stderr and 'NotFound' in e.stderr:
LOG.warning("Attempted to delete path %s but it does "
"not exist.", path)
elif 'Current file system invalid: VolumeNotFound' in e.stderr:
msg = _("Command to delete empty directory %s failed due "
"to context change.") % path
LOG.debug(msg)
raise exception.HNASSSCContextChange(msg=msg)
else:
msg = _("Failed to delete directory %s.") % path
LOG.exception(msg)
raise exception.HNASBackendException(msg=msg)
class Export(object):
def __init__(self, data):
if data:
split_data = data.split('Export configuration:\n')
items = split_data[0].split('\n')
self.export_name = items[0].split(':')[1].strip()
self.export_path = items[1].split(':')[1].strip()
if '*** not available ***' in items[2]:
self.file_system_info = items[2].split(':')[1].strip()
index = 0
else:
self.file_system_label = items[2].split(':')[1].strip()
self.file_system_size = items[3].split(':')[1].strip()
self.file_system_free_space = items[4].split(':')[1].strip()
self.file_system_state = items[5].split(':')[1]
self.formatted = items[6].split('=')[1].strip()
self.mounted = items[7].split('=')[1].strip()
self.failed = items[8].split('=')[1].strip()
self.thin_provisioned = items[9].split('=')[1].strip()
index = 7
self.access_snapshots = items[3 + index].split(':')[1].strip()
self.display_snapshots = items[4 + index].split(':')[1].strip()
self.read_caching = items[5 + index].split(':')[1].strip()
self.disaster_recovery_setting = items[6 + index].split(':')[1]
self.recovered = items[7 + index].split('=')[1].strip()
self.transfer_setting = items[8 + index].split('=')[1].strip()
self.export_configuration = []
export_config = split_data[1].split('\n')
for i in range(0, len(export_config)):
if any(j.isdigit() or j.isalpha() for j in export_config[i]):
self.export_configuration.append(export_config[i])
class JobStatus(object):
def __init__(self, data):
if data:
lines = data.split("\n")
self.job_id = lines[0].split()[3]
self.physical_node = lines[2].split()[3]
self.evs = lines[3].split()[2]
self.volume_number = lines[4].split()[3]
self.fs_id = lines[5].split()[4]
self.fs_name = lines[6].split()[4]
self.source_path = lines[7].split()[3]
self.creation_time = " ".join(lines[8].split()[3:5])
self.destination_path = lines[9].split()[3]
self.ensure_path_exists = lines[10].split()[5]
self.job_state = " ".join(lines[12].split()[3:])
self.job_started = " ".join(lines[14].split()[2:4])
self.job_ended = " ".join(lines[15].split()[2:4])
self.job_status = lines[16].split()[2]
error_details_line = lines[17].split()
if len(error_details_line) > 3:
self.error_details = " ".join(error_details_line[3:])
else:
self.error_details = None
self.directories_processed = lines[18].split()[3]
self.files_processed = lines[19].split()[3]
self.data_bytes_processed = lines[20].split()[4]
self.directories_missing = lines[21].split()[4]
self.files_missing = lines[22].split()[4]
self.files_skipped = lines[23].split()[4]
skipping_details_line = lines[24].split()
if len(skipping_details_line) > 3:
self.skipping_details = " ".join(skipping_details_line[3:])
else:
self.skipping_details = None
class JobSubmit(object):
def __init__(self, data):
if data:
split_data = data.replace(".", "").split()
self.request_status = " ".join(split_data[1:4])
self.job_id = split_data[8]
class Filesystem(object):
def __init__(self, data):
if data:
items = data.split()
self.id = items[0]
self.label = items[1]
self.evs = items[2]
self.size = float(items[3])
self.size_measure = items[4]
if self.size_measure == 'TB':
self.size = self.size * units.Ki
if items[5:7] == ["Not", "mounted"]:
self.mounted = False
else:
self.mounted = True
self.used = float(items[5])
self.used_measure = items[6]
if self.used_measure == 'TB':
self.used = self.used * units.Ki
self.dedupe = 'dedupe enabled' in data
class Quota(object):
def __init__(self, data):
if data:
if 'No quotas matching' in data:
self.type = None
self.target = None
self.usage = None
self.usage_unit = None
self.limit = None
self.limit_unit = None
else:
items = data.split()
self.type = items[2]
self.target = items[6]
self.usage = items[9]
self.usage_unit = items[10]
if items[13] == 'Unset':
self.limit = None
else:
self.limit = float(items[13])
self.limit_unit = items[14]
class CIFSPermissions(object):
def __init__(self, data):
self.permission_list = []
hnas_cifs_permissions = [('Allow Read', 'ar'),
('Allow Change & Read', 'acr'),
('Allow Full Control', 'af'),
('Deny Read', 'dr'),
('Deny Change & Read', 'dcr'),
('Deny Full Control', 'df')]
lines = data.split('\n')
for line in lines:
filtered = list(filter(lambda x: x[0] in line,
hnas_cifs_permissions))
if len(filtered) == 1:
token, permission = filtered[0]
user = line.split(token)[1:][0].strip()
self.permission_list.append((user, permission))
class CIFSShare(object):
def __init__(self, data):
lines = data.split('\n')
for line in lines:
if 'File system label' in line:
self.fs = line.split(': ')[1]
elif 'Share users' in line:
users = line.split(': ')
self.is_mounted = users[1] != '0'
| |
#!/usr/bin/env python
"""Unit tests for ctags.py"""
import os
import sys
import tempfile
import codecs
from subprocess import CalledProcessError
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
try:
import sublime
if int(sublime.version()) > 3000:
from . import ctags
else:
import ctags
except:
import ctags
class CTagsTest(unittest.TestCase):
"""
Helper functions
"""
def build_python_file(self):
"""Build a simple Python "program" that ctags can use.
:Returns:
Path to a constructed, valid Python source file
"""
path = ''
# the file created here is locked while open, hence we can't delete
# similarly, ctags appears to require an extension hence the suffix
with tempfile.NamedTemporaryFile(delete=False, suffix='.py') as temp:
try:
path = temp.name # store name for later use
temp.writelines([
b'def my_definition():\n',
b'\toutput = "Hello, world!"\n',
b'\tprint(output)\n'])
finally:
temp.close()
return path
def build_python_file__extended(self):
"""Build a Python "program" demonstrating all common CTag types
Build a Python program that demonstrates the following CTag types:
- ``f`` - function definitions
- ``v`` - variable definitions
- ``c`` - classes
- ``m`` - class, struct, and union members
- ``i`` - import
This is mainly intended to regression test for issue #209.
:Returns:
Path to a constructed, valid Python source file
"""
path = ''
# the file created here is locked while open, hence we can't delete
# similarly, ctags appears to require an extension hence the suffix
with tempfile.NamedTemporaryFile(delete=False, suffix='.py') as temp:
try:
path = temp.name # store name for later use
temp.writelines([
b'import os\n',
b'\n',
b'COLOR_RED = "\\c800080FF;"\t#red\n',
b'\n',
b'def my_function(first_name):\n',
b'\tprint("Hello {0}".format(first_name))\n',
b'\n',
b'class MyClass(object):\n',
b'\tlast_name = None\n',
b'\taddress = None\t# comment preceded by a tab\n',
b'\n',
b'\tdef my_method(self, last_name):\n',
b'\t\tself.last_name = last_name\n',
b'\t\tprint("Hello again, {0}".format(self.last_name))\n'])
finally:
temp.close()
return path
def build_java_file(self):
"""Build a slightly detailed Java "program" that ctags can use.
Build a slightly more detailed program that 'build_python_file' does,
in order to test more advanced functionality of ctags.py, or ctags.exe
:Returns:
Path to a constructed, valid Java source file
"""
path = ''
# the file created here is locked while open, hence we can't delete
# similarly, ctags appears to require an extension hence the suffix
with tempfile.NamedTemporaryFile(delete=False, suffix='.java') as temp:
try:
path = temp.name # store name for later use
temp.writelines([
b'public class DemoClass {\n',
b'\tpublic static void main(String args[]) {\n',
b'\t\tSystem.out.println("Hello, World");\n',
b'\n',
b'\t\tDemoClass demo = new DemoClass();\n',
b'\t\tSystem.out.printf("Sum %d\n", demo.getSum(5,6));\n',
b'\t}\n',
b'\n',
b'\tprivate int getSum(int a, int b) {\n',
b'\t\treturn (a + b);\n',
b'\t}\n',
b'}\n'])
finally:
temp.close()
return path
def build_c_file(self):
"""Build a simple C "program" that ctags can use.
This is mainly intended to regression test for issue #213.
:Returns:
Path to a constructed, valid Java source file
"""
path = ''
# the file created here is locked while open, hence we can't delete
# similarly, ctags appears to require an extension hence the suffix
with tempfile.NamedTemporaryFile(delete=False, suffix='.c') as temp:
try:
path = temp.name # store name for later use
temp.writelines([
b'#define foo(x,y) x+y\n'
b'#define foobar 1\n'
b'\n'
b'void bar()\n'
b'{\n'
b'\tfoo(10,2);'
b'\n'
b'#if foobar\n'
b'\tfoo(2,3); \n'
b'}\n'])
finally:
temp.close()
return path
"""
Test functions
"""
def setUp(self):
"""Set up test environment.
Ensures the ``ctags_not_on_path`` test is run first, and all other
tests are skipped if this fails. If ctags is not installed, no test
will pass
"""
self.test_build_ctags__ctags_on_path()
"""build ctags"""
def test_build_ctags__ctags_on_path(self):
"""Checks that ``ctags`` is in ``PATH``"""
# build_ctags requires a real path, so we create a temporary file as a
# cross-platform way to get the temp directory
with tempfile.NamedTemporaryFile() as temp:
try:
ctags.build_ctags(path=temp.name)
except EnvironmentError:
self.fail('build_ctags() raised EnvironmentError. ctags not'
' on path')
def test_build_ctags__custom_command(self):
"""Checks for support of simple custom command to execute ctags"""
# build_ctags requires a real path, so we create a temporary file as a
# cross-platform way to get the temp directory
with tempfile.NamedTemporaryFile() as temp:
try:
ctags.build_ctags(path=temp.name, cmd='ctags')
except EnvironmentError:
self.fail('build_ctags() raised EnvironmentError. ctags not'
' on path')
def test_build_ctags__invalid_custom_command(self):
"""Checks for failure for invalid custom command to execute ctags"""
# build_ctags requires a real path, so we create a temporary file as a
# cross-platform way to get the temp directory
with tempfile.NamedTemporaryFile() as temp:
with self.assertRaises(CalledProcessError):
ctags.build_ctags(path=temp.name, cmd='ccttaaggss')
def test_build_ctags__single_file(self):
"""Test execution of ctags using a single temporary file"""
path = self.build_python_file()
tag_file = ctags.build_ctags(path=path)
with codecs.open(tag_file, encoding='utf-8') as output:
try:
content = output.readlines()
filename = os.path.basename(path)
self.assertEqual(
content[-1],
'my_definition\t{0}\t/^def my_definition()'
':$/;"\tf{1}'.format(filename, os.linesep))
finally:
output.close()
os.remove(path) # clean up
os.remove(tag_file)
def test_build_ctags__custom_tag_file(self):
"""Test execution of ctags using a custom tag file"""
path = self.build_python_file()
tag_file = ctags.build_ctags(path=path, tag_file='my_tag_file')
with codecs.open(tag_file, encoding='utf-8') as output:
try:
content = output.readlines()
filename = os.path.basename(path)
self.assertEqual(
content[-1],
'my_definition\t{0}\t/^def my_definition()'
':$/;"\tf{1}'.format(filename, os.linesep))
finally:
output.close()
os.remove(path) # clean up
os.remove(tag_file)
def test_build_ctags__additional_options(self):
"""Test execution of ctags using additional ctags options"""
path = self.build_python_file()
tag_file = ctags.build_ctags(path=path, tag_file='my_tag_file',
opts="--language-force=java")
with codecs.open(tag_file, encoding='utf-8') as output:
try:
content = output.readlines()
# there should be nothing in the file but headers (due to the
# Java 'language-force' option on a Python file)
self.assertEqual(
content[-1][:2], # all comments start with '!_' - confirm
'!_')
finally:
output.close()
os.remove(path) # clean up
os.remove(tag_file)
"""post_process_tag"""
def test_post_process_tag__line_numbers(self):
"""Test ``post_process_tag`` with a line number ``excmd`` variable.
Test function with an sample tag from a Python file. This in turn tests
the supporting functions.
"""
tag = {
'symbol': 'acme_function',
'filename': '.\\a_folder\\a_script.py',
'ex_command': '99',
'type': 'f',
'fields': None}
expected_output = {
'symbol': 'acme_function',
'filename': '.\\a_folder\\a_script.py',
'tag_path': ('.\\a_folder\\a_script.py', 'acme_function'),
'ex_command': '99',
'type': 'f',
'fields': None}
result = ctags.post_process_tag(tag)
self.assertEqual(result, expected_output)
def test_post_process_tag__regex_no_fields(self):
"""Test ``post_process_tag`` with a regex ``excmd`` variable.
Test function with an sample tag from a Python file. This in turn tests
the supporting functions.
"""
tag = {
'symbol': 'acme_function',
'filename': '.\\a_folder\\a_script.py',
'ex_command': '/^def acme_function(tag):$/',
'type': 'f',
'fields': None}
expected_output = {
'symbol': 'acme_function',
'filename': '.\\a_folder\\a_script.py',
'tag_path': ('.\\a_folder\\a_script.py', 'acme_function'),
'ex_command': 'def acme_function(tag):',
'type': 'f',
'fields': None}
result = ctags.post_process_tag(tag)
self.assertEqual(result, expected_output)
def test_post_process_tag__fields(self):
"""Test ``post_process_tag`` with a number of ``field`` variables.
Test function with an sample tag from a Java file. This in turn tests
the supporting functions.
"""
tag = {
'symbol': 'getSum',
'filename': '.\\a_folder\\DemoClass.java',
'ex_command': '/^\tprivate int getSum(int a, int b) {$/',
'type': 'm',
'fields': 'class:DemoClass\tfile:'}
expected_output = {
'symbol': 'getSum',
'filename': '.\\a_folder\\DemoClass.java',
'tag_path': ('.\\a_folder\\DemoClass.java', 'DemoClass', 'getSum'),
'ex_command': '\tprivate int getSum(int a, int b) {',
'type': 'm',
'fields': 'class:DemoClass\tfile:',
'field_keys': ['class', 'file'],
'class': 'DemoClass',
'file': ''}
result = ctags.post_process_tag(tag)
self.assertEqual(result, expected_output)
"""Tag class"""
def test_parse_tag_lines__python(self):
"""Test ``parse_tag_lines`` with a sample Python file"""
path = self.build_python_file__extended()
tag_file = ctags.build_ctags(path=path, opts=['--python-kinds=-i'])
with codecs.open(tag_file, encoding='utf-8') as output:
try:
content = output.readlines()
filename = os.path.basename(path)
except:
self.fail("Setup of files for test failed")
finally:
output.close()
os.remove(path) # clean up
os.remove(tag_file)
expected_outputs = {
'MyClass': [{
'symbol': 'MyClass',
'filename': filename,
'ex_command': 'class MyClass(object):',
'tag_path': (filename, 'MyClass'),
'type': 'c',
'fields': None}],
'address': [{
'symbol': 'address',
'filename': filename,
'ex_command': '\taddress = None\t# comment preceded by a tab',
'tag_path': (filename, 'MyClass', 'address'),
'type': 'v',
'fields': 'class:MyClass',
'field_keys': ['class'],
'class': 'MyClass'}],
'last_name': [{
'symbol': 'last_name',
'filename': filename,
'ex_command': '\tlast_name = None',
'tag_path': (filename, 'MyClass', 'last_name'),
'type': 'v',
'fields': 'class:MyClass',
'field_keys': ['class'],
'class': 'MyClass'}],
'my_function': [{
'symbol': 'my_function',
'filename': filename,
'ex_command': 'def my_function(first_name):',
'tag_path': (filename, 'my_function'),
'type': 'f',
'fields': None}],
'my_method': [{
'symbol': 'my_method',
'filename': filename,
'ex_command': '\tdef my_method(self, last_name):',
'tag_path': (filename, 'MyClass', 'my_method'),
'type': 'm',
'fields': 'class:MyClass',
'field_keys': ['class'],
'class': 'MyClass'}],
'COLOR_RED': [{
'symbol': 'COLOR_RED',
'filename': filename,
'ex_command': 'COLOR_RED = "\\c800080FF;"\t#red',
'tag_path': (filename, 'COLOR_RED'),
'type': 'v',
'fields': None}],
}
result = ctags.parse_tag_lines(content)
for key in expected_outputs:
self.assertEqual(result[key], expected_outputs[key])
for key in result: # don't forget - we might have missed something!
self.assertEqual(expected_outputs[key], result[key])
def test_parse_tag_lines__c(self):
"""Test ``parse_tag_lines`` with a sample C file"""
path = self.build_c_file()
tag_file = ctags.build_ctags(path=path)
with codecs.open(tag_file, encoding='utf-8') as output:
try:
content = output.readlines()
filename = os.path.basename(path)
except IOError:
self.fail("Setup of files for test failed")
finally:
output.close()
os.remove(path) # clean up
os.remove(tag_file)
expected_outputs = {
'bar': [{
'symbol': 'bar',
'filename': filename,
'ex_command': 'void bar()',
'tag_path': (filename, 'bar'),
'type': 'f',
'fields': None}],
'foo': [{
'symbol': 'foo',
'filename': filename,
'ex_command': '1',
'tag_path': (filename, 'foo'),
'type': 'd',
'fields': 'file:',
'field_keys': ['file'],
'file': ''}],
'foobar': [{
'symbol': 'foobar',
'filename': filename,
'ex_command': '2',
'tag_path': (filename, 'foobar'),
'type': 'd',
'fields': 'file:',
'field_keys': ['file'],
'file': ''}]
}
result = ctags.parse_tag_lines(content)
for key in expected_outputs:
self.assertEqual(result[key], expected_outputs[key])
for key in result: # don't forget - we might have missed something!
self.assertEqual(expected_outputs[key], result[key])
if __name__ == '__main__':
unittest.main()
| |
import maya.cmds as cmds
import tempfile
import base64
import os
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import maya.OpenMayaUI as OpenMayaUI
import PySide2.QtCore as QtCore
import PySide2.QtGui as QtGui
import PySide2.QtWidgets as QtWidgets
import shiboken2
import mtoa.ui.arnoldmenu as arnoldmenu
def _getQWidgetByWindowTitle(string):
ptr = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget)
children = mayaMainWindow.children()
for child in children:
if type(child) is QtWidgets.QWidget and child.windowTitle() == string:
return child
return None
def _getArnoldIpr():
WINDOW_TITLE = 'Arnold Render View'
ptr = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget)
renderViewQt = None
for obj in QtWidgets.QApplication.allWidgets():
if obj.windowTitle() == WINDOW_TITLE:
renderViewQt = obj
break
if renderViewQt is None:
arnoldmenu.arnoldMtoARenderView()
for obj in QtWidgets.QApplication.allWidgets():
if obj.windowTitle() == WINDOW_TITLE:
renderViewQt = obj
break
return renderViewQt
def assetWindow(*args):
windowID = 'assetsWindow'
window = None
windowMargin = (0, 0)
windowFrameWidth = 8
windowTitleBarHeight = 30
class AssetsLayoutWindow(MayaQWidgetDockableMixin, QtWidgets.QWidget):
toolName = 'assetsLayoutWidget'
def __init__(self, parent=None):
self.deleteInstances()
super(AssetsLayoutWindow, self).__init__(parent=parent)
ptr = OpenMayaUI.MQtUtil.mainWindow()
self.mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QMainWindow)
self.setWindowFlags(QtCore.Qt.Window)
self.setWindowTitle('Assets Layout')
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred )
self.setObjectName(self.__class__.toolName)
self.setContentsMargins(0,0,0,0)
# self.setStyleSheet(
# 'QWidget {\
# font-family: "Segoe UI";\
# font-size: 11;\
# font-style: normal;\
# font-variant: normal;\
# color: rgb(200,200,200);\
# font-weight: normal;\
# margin:2;\
# padding:0;\
# border-style: none;\
# border-width: 0\
# }\
# QWidget:active {\
# border-style: none;\
# border-width: 0\
# }\
# QWidget:focus {\
# border-style: none;\
# border-width: 0\
# }'
# )
# Set window Layout
QHBoxLayout = QtWidgets.QHBoxLayout()
QHBoxLayout.setObjectName('%s%s'%(self.toolName,'QHBoxLayout'))
QHBoxLayout.setContentsMargins(windowMargin[0],windowMargin[1],windowMargin[0],windowMargin[1])
QTabWidget = QtWidgets.QTabWidget()
QTabWidget.setObjectName('%s%s'%(self.toolName,'QTabWidget'))
QHBoxLayout.addWidget(QTabWidget)
self.setLayout(QHBoxLayout)
self.QHBoxLayout = QHBoxLayout
self.QTabWidget = QTabWidget
def hideEvent(self, event):
"""
On a hideEvent unparent the render view.
This is needed to avoid a maya crash.
"""
print 'hideEvent!'
WINDOW_TITLE = 'Arnold Render View'
ptr = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget)
renderViewQt = None
for obj in QtWidgets.QApplication.allWidgets():
if obj.windowTitle() == WINDOW_TITLE:
obj.setParent(mayaMainWindow, QtCore.Qt.Window)
obj.show()
qr = obj.frameGeometry()
cp = QtWidgets.QDesktopWidget().screenGeometry(0).center()
qr.moveCenter(cp)
obj.move(qr.topLeft())
obj.hide()
break
def paintEvent(self, event):
rect = QtCore.QRect(0, 0, self.width(), self.height())
painter = QtGui.QPainter()
painter.begin(self)
painter.setPen(QtGui.QPen(QtGui.QColor(210,210,210)))
painter.setBrush(QtGui.QBrush(QtGui.QColor(48,48,48)))
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.save()
painter.drawRect(rect)
painter.restore()
painter.end()
def deleteInstances(self):
r = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QMainWindow)
# Delete the workspaceControl
control = self.__class__.toolName + 'WorkspaceControl'
if cmds.workspaceControl(control, q=True, exists=True):
cmds.workspaceControl(control, e=True, close=True)
print 'Deleting control {0}'.format(control)
cmds.deleteUI(control, control=True)
# Delete the instance
for obj in QtWidgets.QApplication.allWidgets():
if obj.objectName() == self.__class__.toolName:
cmds.workspaceControl(self.__class__.toolName + 'WorkspaceControl', query=True, exists=True)
# Unparent arnold instance
print 'Deleting instance {0}'.format(obj)
# Delete it for good
obj.setParent(None)
obj.deleteLater()
class CloseEventFilter(QtCore.QObject):
"""
Event filter which emits a parent_closed signal whenever
the monitored widget closes.
via:
https://github.com/shotgunsoftware/tk-maya/blob/master/python/tk_maya/panel_util.py
"""
def set_associated_widget(self, widget_id):
"""
Set the widget to effect
"""
self._widget_id = widget_id
def eventFilter(self, obj, event):
print event.type()
"""
QT Event filter callback
:param obj: The object where the event originated from
:param event: The actual event object
:returns: True if event was consumed, False if not
"""
if event.type() == QtCore.QEvent.Type.Close:
print 'CloseEvent'
return False
ptr = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QMainWindow)
window = AssetsLayoutWindow()
window.show(dockable=True)
renderViewQt = None
WINDOW_TITLE = 'Arnold Render View'
ptr = OpenMayaUI.MQtUtil.mainWindow()
mayaMainWindow = shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget)
renderViewQt = None
for obj in QtWidgets.QApplication.allWidgets():
if obj.windowTitle() == WINDOW_TITLE:
renderViewQt = obj
break
if renderViewQt is None:
arnoldmenu.arnoldMtoARenderView()
for obj in QtWidgets.QApplication.allWidgets():
if obj.windowTitle() == WINDOW_TITLE:
renderViewQt = obj
break
renderViewQt.hide()
renderViewQt.setParent(mayaMainWindow, QtCore.Qt.Window)
# filter = CloseEventFilter(window)
# filter.set_associated_widget(window)
# window.installEventFilter(filter)
if renderViewQt is not None:
window.QTabWidget.addTab(renderViewQt, '&Arnold IPR')
renderViewQt.show()
# UV Editor
uvEditorQt = _getQWidgetByWindowTitle('UV Editor')
if uvEditorQt is None:
cmds.TextureViewWindow()
uvEditorQt = _getQWidgetByWindowTitle('UV Editor')
if uvEditorQt is not None:
window.QTabWidget.addTab(uvEditorQt, '&UV Editor')
# Render Setup Utility
rsUtilQt = _getQWidgetByWindowTitle('Render Setup Utility')
if rsUtilQt is None:
import RenderSetupUtility.setup as setup; setup.init()
rsUtilQt = _getQWidgetByWindowTitle('Render Setup Utility')
rsUtilQt.setFixedWidth(375)
rsUtilQt.setObjectName('rsUtilityWidget')
window.layout().insertWidget(1, rsUtilQt)
children = window.children()
for c in children:
if c.objectName() == 'rsUtilityWidget':
children = c.children()
print children
for c in children:
if type(c) is QtWidgets.QWidget:
print c.setContentsMargins(0,0,0,0)
if type(c) is QtWidgets.QSplitter:
print c.setContentsMargins(0,0,0,0)
win = _getQWidgetByWindowTitle('Assets Layout')
qr = win.frameGeometry()
if QtWidgets.QDesktopWidget().screenCount() == 1:
cp = QtWidgets.QDesktopWidget().screenGeometry(0).center()
if QtWidgets.QDesktopWidget().screenCount() == 2:
cp = QtWidgets.QDesktopWidget().screenGeometry(1).center()
qr.moveCenter(cp)
win.move(qr.topLeft())
# win.showFullScreen()
assetWindow()
| |
# coding: utf-8
# pylint: disable = invalid-name, W0105, C0301
from __future__ import absolute_import
import collections
from operator import gt, lt
from .compat import range_
class EarlyStopException(Exception):
"""Exception of early stopping.
Parameters
----------
best_iteration : int
The best iteration stopped.
"""
def __init__(self, best_iteration):
super(EarlyStopException, self).__init__()
self.best_iteration = best_iteration
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"LightGBMCallbackEnv",
["model",
"params",
"iteration",
"begin_iteration",
"end_iteration",
"evaluation_result_list"])
def _format_eval_result(value, show_stdv=True):
"""format metric string"""
if len(value) == 4:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
elif len(value) == 5:
if show_stdv:
return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4])
else:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
else:
raise ValueError("Wrong metric value")
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
"""
def callback(env):
"""internal function"""
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
print('[%d]\t%s' % (env.iteration + 1, result))
callback.order = 10
return callback
def record_evaluation(eval_result):
"""Create a call back that records the evaluation history into eval_result.
Parameters
----------
eval_result : dict
A dictionary to store the evaluation results.
Returns
-------
callback : function
The requested callback function.
"""
if not isinstance(eval_result, dict):
raise TypeError('Eval_result should be a dictionary')
eval_result.clear()
def init(env):
"""internal function"""
for data_name, _, _, _ in env.evaluation_result_list:
eval_result.setdefault(data_name, collections.defaultdict(list))
def callback(env):
"""internal function"""
if not eval_result:
init(env)
for data_name, eval_name, result, _ in env.evaluation_result_list:
eval_result[data_name][eval_name].append(result)
callback.order = 20
return callback
def reset_parameter(**kwargs):
"""Reset parameter after first iteration
NOTE: the initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs: value should be list or function
List of parameters for each boosting round
or a customized function that calculates learning_rate in terms of
current number of round (e.g. yields learning rate decay)
- list l: parameter = l[current_round]
- function f: parameter = f(current_round)
Returns
-------
callback : function
The requested callback function.
"""
def callback(env):
"""internal function"""
new_parameters = {}
for key, value in kwargs.items():
if key in ['num_class', 'boosting_type', 'metric']:
raise RuntimeError("cannot reset {} during training".format(repr(key)))
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'.".format(repr(key)))
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
callback.before_iteration = True
callback.order = 10
return callback
def early_stopping(stopping_rounds, verbose=True):
"""Create a callback that activates early stopping.
Activates early stopping.
Requires at least one validation data and one metric
If there's more than one, will check all of them
Parameters
----------
stopping_rounds : int
The stopping rounds before the trend occur.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
best_score = []
best_iter = []
best_msg = []
cmp_op = []
def init(env):
"""internal function"""
if not env.evaluation_result_list:
raise ValueError('For early stopping, at least one dataset and eval metric is required for evaluation')
if verbose:
msg = "Train until valid scores didn't improve in {} rounds."
print(msg.format(stopping_rounds))
for eval_ret in env.evaluation_result_list:
best_iter.append(0)
if verbose:
best_msg.append(None)
if eval_ret[3]:
best_score.append(float('-inf'))
cmp_op.append(gt)
else:
best_score.append(float('inf'))
cmp_op.append(lt)
def callback(env):
"""internal function"""
if not cmp_op:
init(env)
best_msg_buffer = None
for i in range_(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2]
if cmp_op[i](score, best_score[i]):
best_score[i] = score
best_iter[i] = env.iteration
if verbose:
if not best_msg_buffer:
best_msg_buffer = '[%d]\t%s' % (
env.iteration + 1, '\t'.join([_format_eval_result(x) for x in env.evaluation_result_list]))
best_msg[i] = best_msg_buffer
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
print('Early stopping, best iteration is:\n' + best_msg[i])
raise EarlyStopException(best_iter[i])
callback.order = 30
return callback
| |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training the distilled model.
Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2.
"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPT2Config,
GPT2LMHeadModel,
GPT2Tokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
MODEL_CLASSES = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
}
def sanity_checks(args):
"""
A bunch of args sanity checks to perform even starting...
"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def freeze_pos_embeddings(student, args):
if args.student_type == "roberta":
student.roberta.embeddings.position_embeddings.weight.requires_grad = False
elif args.student_type == "gpt2":
student.transformer.wpe.weight.requires_grad = False
def freeze_token_type_embeddings(student, args):
if args.student_type == "roberta":
student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False
def main():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.")
parser.add_argument(
"--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)"
)
parser.add_argument(
"--data_file",
type=str,
required=True,
help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.",
)
parser.add_argument(
"--student_type",
type=str,
choices=["distilbert", "roberta", "gpt2"],
required=True,
help="The student type (DistilBERT, RoBERTa).",
)
parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.")
parser.add_argument(
"--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint."
)
parser.add_argument(
"--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)."
)
parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.")
parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.")
parser.add_argument(
"--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0."
)
parser.add_argument(
"--alpha_mlm",
default=0.0,
type=float,
help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.",
)
parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.")
parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.")
parser.add_argument(
"--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0."
)
parser.add_argument(
"--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM."
)
parser.add_argument(
"--mlm_mask_prop",
default=0.15,
type=float,
help="Proportion of tokens for which we need to make a prediction.",
)
parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.")
parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.")
parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.")
parser.add_argument(
"--mlm_smoothing",
default=0.7,
type=float,
help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).",
)
parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.")
parser.add_argument(
"--restrict_ce_to_mask",
action="store_true",
help="If true, compute the distilation loss only the [MLM] prediction distribution.",
)
parser.add_argument(
"--freeze_pos_embs",
action="store_true",
help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.",
)
parser.add_argument(
"--freeze_token_type_embds",
action="store_true",
help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.",
)
parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.")
parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).")
parser.add_argument(
"--group_by_size",
action="store_false",
help="If true, group sequences that have similar length into the same batch. Default is true.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=50,
help="Gradient accumulation for larger training batches.",
)
parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.")
parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.")
parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank")
parser.add_argument("--seed", type=int, default=56, help="Random seed")
parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.")
parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.")
args = parser.parse_args()
sanity_checks(args)
# ARGS #
init_gpu_params(args)
set_seed(args)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it"
"Use `--force` if you want to overwrite it"
)
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(f"Experiment will be dumped and logged in {args.dump_path}")
# SAVE PARAMS #
logger.info(f"Param: {args}")
with open(os.path.join(args.dump_path, "parameters.json"), "w") as f:
json.dump(vars(args), f, indent=4)
git_log(args.dump_path)
student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type]
teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name)
special_tok_ids = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
idx = tokenizer.all_special_tokens.index(tok_symbol)
special_tok_ids[tok_name] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}")
args.special_tok_ids = special_tok_ids
args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}")
with open(args.data_file, "rb") as fp:
data = pickle.load(fp)
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)")
with open(args.token_counts, "rb") as fp:
counts = pickle.load(fp)
token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
token_probs[idx] = 0.0 # do not predict special tokens
token_probs = torch.from_numpy(token_probs)
else:
token_probs = None
train_lm_seq_dataset = LmSeqsDataset(params=args, data=data)
logger.info("Data loader created.")
# STUDENT #
logger.info(f"Loading student config from {args.student_config}")
stu_architecture_config = student_config_class.from_pretrained(args.student_config)
stu_architecture_config.output_hidden_states = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}")
student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config)
else:
student = student_model_class(stu_architecture_config)
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}")
logger.info("Student loaded.")
# TEACHER #
teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True)
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}")
logger.info(f"Teacher loaded from {args.teacher_name}.")
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(student, args)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(student, args)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
distiller = Distiller(
params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher
)
distiller.train()
logger.info("Let's go get some drinks.")
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class ResourceGroupsOperations(object):
"""ResourceGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_resources(
self, resource_group_name, filter=None, expand=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Get all the resources for a resource group.
:param resource_group_name: The resource group with the resources to
get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param expand: The $expand query parameter
:type expand: str
:param top: The number of results to return. If null is passed,
returns all resources.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`GenericResourcePaged
<azure.mgmt.resource.resources.models.GenericResourcePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.GenericResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.GenericResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def check_existence(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check.
The name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204, 404]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = (response.status_code == 204)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a resource group.
:param resource_group_name: The name of the resource group to create
or update.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a
resource group.
:type parameters: :class:`ResourceGroup
<azure.mgmt.resource.resources.models.ResourceGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceGroup
<azure.mgmt.resource.resources.models.ResourceGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ResourceGroup')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', response)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a resource group.
When you delete a resource group, all of its resources are also
deleted. Deleting a resource group deletes all of its template
deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete.
The name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceGroup
<azure.mgmt.resource.resources.models.ResourceGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def patch(
self, resource_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a
group address. The format of the request is the same as that for
creating a resource group. If a field is unspecified, the current value
is retained.
:param resource_group_name: The name of the resource group to update.
The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group.
:type parameters: :class:`ResourceGroup
<azure.mgmt.resource.resources.models.ResourceGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceGroup
<azure.mgmt.resource.resources.models.ResourceGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ResourceGroup')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def export_template(
self, resource_group_name, resources=None, options=None, custom_headers=None, raw=False, **operation_config):
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group to export
as a template.
:type resource_group_name: str
:param resources: The IDs of the resources. The only supported string
currently is '*' (all resources). Future updates will support
exporting specific resources.
:type resources: list of str
:param options: The export template options. Supported values include
'IncludeParameterDefaultValue', 'IncludeComments' or
'IncludeParameterDefaultValue, IncludeComments
:type options: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceGroupExportResult
<azure.mgmt.resource.resources.models.ResourceGroupExportResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.ExportTemplateRequest(resources=resources, options=options)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExportTemplateRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroupExportResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, filter=None, top=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param top: The number of results to return. If null is passed,
returns all resource groups.
:type top: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceGroupPaged
<azure.mgmt.resource.resources.models.ResourceGroupPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ResourceGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ResourceGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError
from synapse.util.logutils import log_function
from synapse.util.async import run_on_reactor, ObservableDeferred
from synapse.types import StreamToken
import synapse.metrics
import logging
logger = logging.getLogger(__name__)
metrics = synapse.metrics.get_metrics_for(__name__)
notified_events_counter = metrics.register_counter("notified_events")
# TODO(paul): Should be shared somewhere
def count(func, l):
"""Return the number of items in l for which func returns true."""
n = 0
for x in l:
if func(x):
n += 1
return n
class _NotificationListener(object):
""" This represents a single client connection to the events stream.
The events stream handler will have yielded to the deferred, so to
notify the handler it is sufficient to resolve the deferred.
"""
__slots__ = ["deferred"]
def __init__(self, deferred):
self.deferred = deferred
class _NotifierUserStream(object):
"""This represents a user connected to the event stream.
It tracks the most recent stream token for that user.
At a given point a user may have a number of streams listening for
events.
This listener will also keep track of which rooms it is listening in
so that it can remove itself from the indexes in the Notifier class.
"""
def __init__(self, user, rooms, current_token, time_now_ms,
appservice=None):
self.user = str(user)
self.appservice = appservice
self.rooms = set(rooms)
self.current_token = current_token
self.last_notified_ms = time_now_ms
self.notify_deferred = ObservableDeferred(defer.Deferred())
def notify(self, stream_key, stream_id, time_now_ms):
"""Notify any listeners for this user of a new event from an
event source.
Args:
stream_key(str): The stream the event came from.
stream_id(str): The new id for the stream the event came from.
time_now_ms(int): The current time in milliseconds.
"""
self.current_token = self.current_token.copy_and_advance(
stream_key, stream_id
)
self.last_notified_ms = time_now_ms
noify_deferred = self.notify_deferred
self.notify_deferred = ObservableDeferred(defer.Deferred())
noify_deferred.callback(self.current_token)
def remove(self, notifier):
""" Remove this listener from all the indexes in the Notifier
it knows about.
"""
for room in self.rooms:
lst = notifier.room_to_user_streams.get(room, set())
lst.discard(self)
notifier.user_to_user_stream.pop(self.user)
if self.appservice:
notifier.appservice_to_user_streams.get(
self.appservice, set()
).discard(self)
def count_listeners(self):
return len(self.notify_deferred.observers())
def new_listener(self, token):
"""Returns a deferred that is resolved when there is a new token
greater than the given token.
"""
if self.current_token.is_after(token):
return _NotificationListener(defer.succeed(self.current_token))
else:
return _NotificationListener(self.notify_deferred.observe())
class Notifier(object):
""" This class is responsible for notifying any listeners when there are
new events available for it.
Primarily used from the /events stream.
"""
UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000
def __init__(self, hs):
self.hs = hs
self.user_to_user_stream = {}
self.room_to_user_streams = {}
self.appservice_to_user_streams = {}
self.event_sources = hs.get_event_sources()
self.store = hs.get_datastore()
self.pending_new_room_events = []
self.clock = hs.get_clock()
hs.get_distributor().observe(
"user_joined_room", self._user_joined_room
)
self.clock.looping_call(
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
)
# This is not a very cheap test to perform, but it's only executed
# when rendering the metrics page, which is likely once per minute at
# most when scraping it.
def count_listeners():
all_user_streams = set()
for x in self.room_to_user_streams.values():
all_user_streams |= x
for x in self.user_to_user_stream.values():
all_user_streams.add(x)
for x in self.appservice_to_user_streams.values():
all_user_streams |= x
return sum(stream.count_listeners() for stream in all_user_streams)
metrics.register_callback("listeners", count_listeners)
metrics.register_callback(
"rooms",
lambda: count(bool, self.room_to_user_streams.values()),
)
metrics.register_callback(
"users",
lambda: len(self.user_to_user_stream),
)
metrics.register_callback(
"appservices",
lambda: count(bool, self.appservice_to_user_streams.values()),
)
@log_function
@defer.inlineCallbacks
def on_new_room_event(self, event, room_stream_id, max_room_stream_id,
extra_users=[]):
""" Used by handlers to inform the notifier something has happened
in the room, room event wise.
This triggers the notifier to wake up any listeners that are
listening to the room, and any listeners for the users in the
`extra_users` param.
The events can be peristed out of order. The notifier will wait
until all previous events have been persisted before notifying
the client streams.
"""
yield run_on_reactor()
self.pending_new_room_events.append((
room_stream_id, event, extra_users
))
self._notify_pending_new_room_events(max_room_stream_id)
def _notify_pending_new_room_events(self, max_room_stream_id):
"""Notify for the room events that were queued waiting for a previous
event to be persisted.
Args:
max_room_stream_id(int): The highest stream_id below which all
events have been persisted.
"""
pending = self.pending_new_room_events
self.pending_new_room_events = []
for room_stream_id, event, extra_users in pending:
if room_stream_id > max_room_stream_id:
self.pending_new_room_events.append((
room_stream_id, event, extra_users
))
else:
self._on_new_room_event(event, room_stream_id, extra_users)
def _on_new_room_event(self, event, room_stream_id, extra_users=[]):
"""Notify any user streams that are interested in this room event"""
# poke any interested application service.
self.hs.get_handlers().appservice_handler.notify_interested_services(
event
)
app_streams = set()
for appservice in self.appservice_to_user_streams:
# TODO (kegan): Redundant appservice listener checks?
# App services will already be in the room_to_user_streams set, but
# that isn't enough. They need to be checked here in order to
# receive *invites* for users they are interested in. Does this
# make the room_to_user_streams check somewhat obselete?
if appservice.is_interested(event):
app_user_streams = self.appservice_to_user_streams.get(
appservice, set()
)
app_streams |= app_user_streams
self.on_new_event(
"room_key", room_stream_id,
users=extra_users,
rooms=[event.room_id],
extra_streams=app_streams,
)
@defer.inlineCallbacks
@log_function
def on_new_event(self, stream_key, new_token, users=[], rooms=[],
extra_streams=set()):
""" Used to inform listeners that something has happend event wise.
Will wake up all listeners for the given users and rooms.
"""
yield run_on_reactor()
user_streams = set()
for user in users:
user_stream = self.user_to_user_stream.get(str(user))
if user_stream is not None:
user_streams.add(user_stream)
for room in rooms:
user_streams |= self.room_to_user_streams.get(room, set())
time_now_ms = self.clock.time_msec()
for user_stream in user_streams:
try:
user_stream.notify(stream_key, new_token, time_now_ms)
except:
logger.exception("Failed to notify listener")
@defer.inlineCallbacks
def wait_for_events(self, user, timeout, callback, room_ids=None,
from_token=StreamToken("s0", "0", "0", "0", "0")):
"""Wait until the callback returns a non empty response or the
timeout fires.
"""
user = str(user)
user_stream = self.user_to_user_stream.get(user)
if user_stream is None:
appservice = yield self.store.get_app_service_by_user_id(user)
current_token = yield self.event_sources.get_current_token()
if room_ids is None:
rooms = yield self.store.get_rooms_for_user(user)
room_ids = [room.room_id for room in rooms]
user_stream = _NotifierUserStream(
user=user,
rooms=room_ids,
appservice=appservice,
current_token=current_token,
time_now_ms=self.clock.time_msec(),
)
self._register_with_keys(user_stream)
result = None
if timeout:
# Will be set to a _NotificationListener that we'll be waiting on.
# Allows us to cancel it.
listener = None
def timed_out():
if listener:
listener.deferred.cancel()
timer = self.clock.call_later(timeout/1000., timed_out)
prev_token = from_token
while not result:
try:
current_token = user_stream.current_token
result = yield callback(prev_token, current_token)
if result:
break
# Now we wait for the _NotifierUserStream to be told there
# is a new token.
# We need to supply the token we supplied to callback so
# that we don't miss any current_token updates.
prev_token = current_token
listener = user_stream.new_listener(prev_token)
yield listener.deferred
except defer.CancelledError:
break
self.clock.cancel_call_later(timer, ignore_errs=True)
else:
current_token = user_stream.current_token
result = yield callback(from_token, current_token)
defer.returnValue(result)
@defer.inlineCallbacks
def get_events_for(self, user, pagination_config, timeout,
only_room_events=False,
is_guest=False, guest_room_id=None):
""" For the given user and rooms, return any new events for them. If
there are no new events wait for up to `timeout` milliseconds for any
new events to happen before returning.
If `only_room_events` is `True` only room events will be returned.
"""
from_token = pagination_config.from_token
if not from_token:
from_token = yield self.event_sources.get_current_token()
limit = pagination_config.limit
room_ids = []
if is_guest:
if guest_room_id:
if not self._is_world_readable(guest_room_id):
raise AuthError(403, "Guest access not allowed")
room_ids = [guest_room_id]
else:
rooms = yield self.store.get_rooms_for_user(user.to_string())
room_ids = [room.room_id for room in rooms]
@defer.inlineCallbacks
def check_for_updates(before_token, after_token):
if not after_token.is_after(before_token):
defer.returnValue(None)
events = []
end_token = from_token
for name, source in self.event_sources.sources.items():
keyname = "%s_key" % name
before_id = getattr(before_token, keyname)
after_id = getattr(after_token, keyname)
if before_id == after_id:
continue
if only_room_events and name != "room":
continue
new_events, new_key = yield source.get_new_events(
user=user,
from_key=getattr(from_token, keyname),
limit=limit,
is_guest=is_guest,
room_ids=room_ids,
)
if name == "room":
room_member_handler = self.hs.get_handlers().room_member_handler
new_events = yield room_member_handler._filter_events_for_client(
user.to_string(),
new_events,
is_guest=is_guest,
require_all_visible_for_guests=False
)
events.extend(new_events)
end_token = end_token.copy_and_replace(keyname, new_key)
if events:
defer.returnValue((events, (from_token, end_token)))
else:
defer.returnValue(None)
result = yield self.wait_for_events(
user, timeout, check_for_updates, room_ids=room_ids, from_token=from_token
)
if result is None:
result = ([], (from_token, from_token))
defer.returnValue(result)
@defer.inlineCallbacks
def _is_world_readable(self, room_id):
state = yield self.hs.get_state_handler().get_current_state(
room_id,
EventTypes.RoomHistoryVisibility
)
if state and "history_visibility" in state.content:
defer.returnValue(state.content["history_visibility"] == "world_readable")
else:
defer.returnValue(False)
@log_function
def remove_expired_streams(self):
time_now_ms = self.clock.time_msec()
expired_streams = []
expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS
for stream in self.user_to_user_stream.values():
if stream.count_listeners():
continue
if stream.last_notified_ms < expire_before_ts:
expired_streams.append(stream)
for expired_stream in expired_streams:
expired_stream.remove(self)
@log_function
def _register_with_keys(self, user_stream):
self.user_to_user_stream[user_stream.user] = user_stream
for room in user_stream.rooms:
s = self.room_to_user_streams.setdefault(room, set())
s.add(user_stream)
if user_stream.appservice:
self.appservice_to_user_stream.setdefault(
user_stream.appservice, set()
).add(user_stream)
def _user_joined_room(self, user, room_id):
user = str(user)
new_user_stream = self.user_to_user_stream.get(user)
if new_user_stream is not None:
room_streams = self.room_to_user_streams.setdefault(room_id, set())
room_streams.add(new_user_stream)
new_user_stream.rooms.add(room_id)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script validating field trial configs.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
import copy
import io
import json
import sys
from collections import OrderedDict
USE_PYTHON3 = True
VALID_EXPERIMENT_KEYS = [
'name', 'forcing_flag', 'params', 'enable_features', 'disable_features',
'min_os_version', '//0', '//1', '//2', '//3', '//4', '//5', '//6', '//7',
'//8', '//9'
]
FIELDTRIAL_CONFIG_FILE_NAME = 'fieldtrial_testing_config.json'
def PrettyPrint(contents):
"""Pretty prints a fieldtrial configuration.
Args:
contents: File contents as a string.
Returns:
Pretty printed file contents.
"""
# We have a preferred ordering of the fields (e.g. platforms on top). This
# code loads everything into OrderedDicts and then tells json to dump it out.
# The JSON dumper will respect the dict ordering.
#
# The ordering is as follows:
# {
# 'StudyName Alphabetical': [
# {
# 'platforms': [sorted platforms]
# 'groups': [
# {
# name: ...
# forcing_flag: "forcing flag string"
# params: {sorted dict}
# enable_features: [sorted features]
# disable_features: [sorted features]
# (Unexpected extra keys will be caught by the validator)
# }
# ],
# ....
# },
# ...
# ]
# ...
# }
config = json.loads(contents)
ordered_config = OrderedDict()
for key in sorted(config.keys()):
study = copy.deepcopy(config[key])
ordered_study = []
for experiment_config in study:
ordered_experiment_config = OrderedDict([('platforms',
experiment_config['platforms']),
('experiments', [])])
for experiment in experiment_config['experiments']:
ordered_experiment = OrderedDict()
for index in range(0, 10):
comment_key = '//' + str(index)
if comment_key in experiment:
ordered_experiment[comment_key] = experiment[comment_key]
ordered_experiment['name'] = experiment['name']
if 'forcing_flag' in experiment:
ordered_experiment['forcing_flag'] = experiment['forcing_flag']
if 'params' in experiment:
ordered_experiment['params'] = OrderedDict(
sorted(experiment['params'].items(), key=lambda t: t[0]))
if 'enable_features' in experiment:
ordered_experiment['enable_features'] = \
sorted(experiment['enable_features'])
if 'disable_features' in experiment:
ordered_experiment['disable_features'] = \
sorted(experiment['disable_features'])
ordered_experiment_config['experiments'].append(ordered_experiment)
if 'min_os_version' in experiment:
ordered_experiment['min_os_version'] = experiment['min_os_version']
ordered_study.append(ordered_experiment_config)
ordered_config[key] = ordered_study
return json.dumps(
ordered_config, sort_keys=False, indent=4, separators=(',', ': ')) + '\n'
def ValidateData(json_data, file_path, message_type):
"""Validates the format of a fieldtrial configuration.
Args:
json_data: Parsed JSON object representing the fieldtrial config.
file_path: String representing the path to the JSON file.
message_type: Type of message from |output_api| to return in the case of
errors/warnings.
Returns:
A list of |message_type| messages. In the case of all tests passing with no
warnings/errors, this will return [].
"""
def _CreateMessage(message_format, *args):
return _CreateMalformedConfigMessage(message_type, file_path,
message_format, *args)
if not isinstance(json_data, dict):
return _CreateMessage('Expecting dict')
for (study, experiment_configs) in iter(json_data.items()):
warnings = _ValidateEntry(study, experiment_configs, _CreateMessage)
if warnings:
return warnings
return []
def _ValidateEntry(study, experiment_configs, create_message_fn):
"""Validates one entry of the field trial configuration."""
if not isinstance(study, str):
return create_message_fn('Expecting keys to be string, got %s', type(study))
if not isinstance(experiment_configs, list):
return create_message_fn('Expecting list for study %s', study)
# Add context to other messages.
def _CreateStudyMessage(message_format, *args):
suffix = ' in Study[%s]' % study
return create_message_fn(message_format + suffix, *args)
for experiment_config in experiment_configs:
warnings = _ValidateExperimentConfig(experiment_config, _CreateStudyMessage)
if warnings:
return warnings
return []
def _ValidateExperimentConfig(experiment_config, create_message_fn):
"""Validates one config in a configuration entry."""
if not isinstance(experiment_config, dict):
return create_message_fn('Expecting dict for experiment config')
if not 'experiments' in experiment_config:
return create_message_fn('Missing valid experiments for experiment config')
if not isinstance(experiment_config['experiments'], list):
return create_message_fn('Expecting list for experiments')
for experiment_group in experiment_config['experiments']:
warnings = _ValidateExperimentGroup(experiment_group, create_message_fn)
if warnings:
return warnings
if not 'platforms' in experiment_config:
return create_message_fn('Missing valid platforms for experiment config')
if not isinstance(experiment_config['platforms'], list):
return create_message_fn('Expecting list for platforms')
supported_platforms = [
'android', 'android_weblayer', 'android_webview', 'chromeos',
'chromeos_lacros', 'ios', 'linux', 'mac', 'windows'
]
experiment_platforms = experiment_config['platforms']
unsupported_platforms = list(
set(experiment_platforms).difference(supported_platforms))
if unsupported_platforms:
return create_message_fn('Unsupported platforms %s', unsupported_platforms)
return []
def _ValidateExperimentGroup(experiment_group, create_message_fn):
"""Validates one group of one config in a configuration entry."""
name = experiment_group.get('name', '')
if not name or not isinstance(name, str):
return create_message_fn('Missing valid name for experiment')
# Add context to other messages.
def _CreateGroupMessage(message_format, *args):
suffix = ' in Group[%s]' % name
return create_message_fn(message_format + suffix, *args)
if 'params' in experiment_group:
params = experiment_group['params']
if not isinstance(params, dict):
return _CreateGroupMessage('Expected dict for params')
for (key, value) in iter(params.items()):
if not isinstance(key, str) or not isinstance(value, str):
return _CreateGroupMessage('Invalid param (%s: %s)', key, value)
for key in experiment_group.keys():
if key not in VALID_EXPERIMENT_KEYS:
return _CreateGroupMessage('Key[%s] is not a valid key', key)
return []
def _CreateMalformedConfigMessage(message_type, file_path, message_format,
*args):
"""Returns a list containing one |message_type| with the error message.
Args:
message_type: Type of message from |output_api| to return in the case of
errors/warnings.
message_format: The error message format string.
file_path: The path to the config file.
*args: The args for message_format.
Returns:
A list containing a message_type with a formatted error message and
'Malformed config file [file]: ' prepended to it.
"""
error_message_format = 'Malformed config file %s: ' + message_format
format_args = (file_path,) + args
return [message_type(error_message_format % format_args)]
def CheckPretty(contents, file_path, message_type):
"""Validates the pretty printing of fieldtrial configuration.
Args:
contents: File contents as a string.
file_path: String representing the path to the JSON file.
message_type: Type of message from |output_api| to return in the case of
errors/warnings.
Returns:
A list of |message_type| messages. In the case of all tests passing with no
warnings/errors, this will return [].
"""
pretty = PrettyPrint(contents)
if contents != pretty:
return [
message_type('Pretty printing error: Run '
'python3 testing/variations/PRESUBMIT.py %s' % file_path)
]
return []
def CommonChecks(input_api, output_api):
affected_files = input_api.AffectedFiles(
include_deletes=False,
file_filter=lambda x: x.LocalPath().endswith('.json'))
for f in affected_files:
if not f.LocalPath().endswith(FIELDTRIAL_CONFIG_FILE_NAME):
return [
output_api.PresubmitError(
'%s is the only json file expected in this folder. If new jsons '
'are added, please update the presubmit process with proper '
'validation. ' % FIELDTRIAL_CONFIG_FILE_NAME
)
]
contents = input_api.ReadFile(f)
try:
json_data = input_api.json.loads(contents)
result = ValidateData(
json_data,
f.AbsoluteLocalPath(),
output_api.PresubmitError)
if len(result):
return result
result = CheckPretty(contents, f.LocalPath(), output_api.PresubmitError)
if len(result):
return result
except ValueError:
return [
output_api.PresubmitError('Malformed JSON file: %s' % f.LocalPath())
]
return []
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
def main(argv):
with io.open(argv[1], encoding='utf-8') as f:
content = f.read()
pretty = PrettyPrint(content)
io.open(argv[1], 'wb').write(pretty.encode('utf-8'))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
#!/usr/bin/python -tt
'''
File: hg_decoder.py
Date: January 8, 2015
Author: Avneesh Saluja (avneesh@cs.cmu.edu)
Based on compute_hg.py code in spectral-scfg package
Description: modified version of bottom-up parser
where we use a variant of CKY+, leveraging a trie
data structure of the grammar to come up with a hypergraph
representation of the composition of the input sentence
FST and the grammar. The hypergraph is then converted
to a grammar where NTs are indexed/decorated with their span.
The grammar is written out in .gz files separated by sentence.
arg0: dictionary of parameters (output of train.py step)
arg1: output directory for per-sentence grammars
arg2: number of processes to use for decoding
'''
import sys, commands, string, time, gzip, re, getopt, math, cPickle
import multiprocessing as mp
import numpy as np
from scipy.special import expit
from trie import trie, ActiveItem, HyperGraph
from sparsecontainer import ContextExtractor
from model import *
def compute_signdep_means(training):
dimensions = training.shape[1]
pos_means = []
neg_means = []
for col_idx in range(dimensions):
data = training[:,col_idx]
pos_vals = data[data > 0]
pos_avg = np.sum(pos_vals) / len(pos_vals) if len(pos_vals) > 0 else 0.0
pos_means.append(pos_avg)
neg_vals = data[data < 0]
neg_avg = np.sum(neg_vals) / len(neg_vals) if len(neg_vals) > 0 else 0.0
neg_means.append(neg_avg)
return pos_means, neg_means
def compute_feature_thresholds(model, tokens_loc):
tokens_fh = open(tokens_loc, 'rb')
tokens = cPickle.load(tokens_fh)
left_con = cPickle.load(tokens_fh)
right_con = cPickle.load(tokens_fh)
tokens_fh.close()
lr_mat_l, lr_mat_r = model.context.compute_lowrank_training_contexts(left_con.get_token_matrix(), right_con.get_token_matrix())
training_data = np.concatenate((lr_mat_l, lr_mat_r), axis=1)
context_pos_means, context_neg_means = None, None
try:
lowdim_context = training_data.dot(model.context_parameters)
context_pos_means, context_neg_means = compute_signdep_means(lowdim_context)
except AttributeError:
context_pos_means, context_neg_means = compute_signdep_means(training_data)
model.set_context_means(context_pos_means, context_neg_means)
if not model.isvw(): #for CCA and GLM models, where parameters are transparent
lowdim_pps = tokens.get_token_matrix().dot(model.parameters.transpose())
pp_pos_means, pp_neg_means = compute_signdep_means(lowdim_pps)
model.set_phraserep_means(pp_pos_means, pp_neg_means)
'''
do these as global variables because we want to share them amongst processes
if we pass them to the threads, it makes things much slower.
'''
(opts, args) = getopt.getopt(sys.argv[1:], 'bd:cClLn:oprR')
normalize = "none"
represent = False
discretize = ""
covariance = False
plain = False
no_cca = False
log_score = False
only_pos = False
print_best = False
print_rank = False
logistic = False
for opt in opts:
if opt[0] == '-n':
normalize = opt[1]
elif opt[0] == '-r':
represent = True
elif opt[0] == '-d':
discretize = opt[1] #location of sparse tokens and context
elif opt[0] == '-p': #plain - no markup
plain = True
elif opt[0] == '-c': #no cca_on/off feature
no_cca = True
elif opt[0] == '-C': #covariance/second order feature for vectors
covariance = True
elif opt[0] == '-l': #put scores in log space
log_score = True
elif opt[0] == '-o': #only positive scores (0-1)
only_pos = True
elif opt[0] == '-b': #print best
print_best = True
elif opt[0] == '-R': #print rank
print_rank = True
elif opt[0] == '-L': #add logistic function score too
logistic = True
if normalize != "none" and normalize != "exp" and normalize != "range":
sys.stderr.write("Error! normalization option not recognized (valid options are 'none', 'exp', and 'range'). Setting to 'none'\n")
normalize = "none"
if discretize and not represent:
sys.stderr.write("Error! Cannot have discretize on ('-d') without representations being printed out ('-r'); Turning it off\n")
discretize = False
if covariance and not represent:
sys.stderr.write("Error! Cannot have covariance features on ('-C') without representations being printed out ('-r'); Turning it off\n")
covariance = False
if not only_pos and log_score: #would result in domain errors
sys.stderr.write("Error! Cannot have log score without restricting scores to be positive; disabling log score\n")
log_score = False
if log_score and no_cca: #if we are not writing scores, then log scores will be ignored
sys.stderr.write("Warning! Ignoring log_score ('-l') option, since no_cca flag ('-c') is on\n")
param_filename = args[0]
output_dir = args[1]
num_process = int(args[2])
param_fh = open(param_filename, 'rb')
model = cPickle.load(param_fh)
extractor = cPickle.load(param_fh)
param_fh.close()
phrase_pairs = ["[X] ||| " + pair for pair in model.get_tokens()]
phrase_pairs.append("[X] ||| [X,1] [X,2] ||| [1] [2]")
phrase_pairs.append("[X] ||| [X,1] [X,2] ||| [2] [1]")
#dev_grammars=args[3]
grammar_trie = trie(phrase_pairs)
print "Data structures from training stage loaded"
if discretize != "": #compute relevant statistics for discretization
compute_feature_thresholds(model, discretize)
'''
declaration of list that maintains which sentences have failed across all processes
'''
def init(fs):
global failed_sentences
failed_sentences = fs
def main():
failed_sentences = mp.Manager().list()
pool = mp.Pool(processes=num_process, initializer=init, initargs=(failed_sentences,))
for sent_num, line in enumerate(sys.stdin):
out_filename = output_dir + "/grammar.%d.gz"%sent_num
#parse(line.strip().split(), out_filename, sent_num)
pool.apply_async(parse, (line.strip().split(), out_filename, sent_num))
pool.close()
pool.join()
print "number of failed sentences: %d"%(len(failed_sentences))
'''
main function for bottom-up parser with Earley-style rules.
The active chart is first seeded with pointers to the root
node of a source rules trie. Then, in a bottom-up manner,
we advance the dots for each cell item, and then convert completed
rules in a cell to the passive chart, or deal with NTs in active
items just proved. At the end, we look at the passive items in
the cell corresponding to the sentence to see if [S] is there.
'''
def parse(words, out_filename, lineNum):
start = time.clock()
N = len(words)
goal_idx = False
hg = HyperGraph()
active = {}
passive = {}
nodemap = {}
seedActiveChart(N, active)
dev_rules = {}
#dev psg read in (for oracle)
#dev_filename = dev_grammars + "/grammar.%d.gz"%lineNum
#dev_fh = gzip.open(dev_filename, 'rb')
#for line in dev_fh:
# src,tgt,align = line.strip().split(' ||| ')
# key = (src, align)
# dev_rules[key] = tgt
#dev_fh.close()
for l in range(1, N+1): #length
for i in range(0, N+1-l): #left index of span
j = i + l #right index of span
advanceDotsForAllItemsInCell(i, j, words, active, passive)
cell = active[(i,j)][:] if (i,j) in active else [] #list of active items
for activeItem in cell:
rules = activeItem.srcTrie.getRules()
for rule in rules:
applyRule(i, j, rule, activeItem.tailNodeVec, hg, nodemap, passive)
if j < N: #the below function includes NTs that were just proved into new binaries, which is unnecessary for the end token
extendActiveItems(i, i, j, active, passive) #dealing with NTs that were just proved
if (0,N) in passive: #we have spanned the entire input sentence
passiveItems = passive[(0,N)] #list of indices
if len(passiveItems) > 0: #we have at least one node that covers the entire sentence
goal_idx = True
parseTime = time.clock() - start
if goal_idx: #i.e., if we have created at least 1 node in the HG corresponding to goal
print "Parsed sentence; length: %d words, time taken: %.2f sec, sentence ID: %d"%(len(words), parseTime, lineNum)
start = time.clock()
compute_scores(hg, words, out_filename, dev_rules)
cca_time = time.clock() - start
print "SUCESS! Time taken to compute scores: %.2f sec, sentence ID: %d"%(cca_time, lineNum)
else:
print "FAIL; length: %d words, time taken: %.2f sec, sentence ID: %d; sentence: %s"%(len(words), parseTime, lineNum, ' '.join(words))
failed_sentences.append(lineNum)
sys.stdout.flush()
def compute_scores(hg, words, out_filename, dev_rules):
rules_out = []
phrases_to_score = []
phrases_for_oracle = [] #this has been added
for edge in hg.edges_: #first, go through hypergraph process rules that can be written out; store scorable rules for scoring later
head = hg.nodes_[edge.headNode]
left_idx = head.i
right_idx = head.j
LHS = head.cat[:-1] + "_%d_%d]"%(left_idx, right_idx) if not plain else head.cat
if len(edge.tailNodes) > 0: #ITG rules
src_decorated = decorate_src_rule(hg, edge.id)
monotone = "[1] [2] ||| Glue=1"
inverse = "[2] [1] ||| Glue=1 Inverse=1"
rules_out.append("%s ||| %s ||| %s"%(LHS, src_decorated, monotone))
rules_out.append("%s ||| %s ||| %s"%(LHS, src_decorated, inverse))
else:
if edge.rule == "<unk>": #phrase is not in translation inventory
rules_out.append("%s ||| <unk> ||| %s ||| PassThrough=1"%(LHS, words[left_idx]))
else:
left_con_words, right_con_words = extractor.extract_context(words, left_idx, right_idx-1) #same as before, either real-valued arrays or lists of words
left_con_lr, right_con_lr = model.get_context_rep_vec(left_con_words, right_con_words) if extractor.is_repvec() else model.get_context_rep(left_con_words, right_con_words)
if len(edge.rule.split()) == 1: #unigram, so write pass-through to be compatible with cdec
rules_out.append("%s ||| %s ||| %s ||| PassThrough=1"%(LHS, edge.rule, edge.rule))
if left_con_lr is not None and right_con_lr is not None: #valid context
concat_con_lr = np.concatenate((left_con_lr, right_con_lr))
phrases_to_score.append((LHS, edge.rule, concat_con_lr))
phrases_for_oracle.append((edge.rule, left_idx, right_idx-1)) #this has been added
else: #this occurs if all context words are stop words - may want to edit what happens in this branch condition in light of recent changes
left_null = left_con_lr is None
null_context_side = "left" if left_null else "right"
null_context = ' '.join(left_con_words) if left_null else ' '.join(right_con_words)
print "WARNING: Phrase: '%s'; Context on %s ('%s') was filtered; all context words are stop words.\n"%(' '.join(words[left_idx:right_idx]), null_context_side, null_context)
for target_phrase in applicable_rules: #is this a good thing to do by default?
phrase_pair = ' ||| '.join([edge.rule, target_phrase])
rules_out.append("%s ||| %s ||| cca_on=1 cca_score=0"%(LHS, phrase_pair))
scored_pps_all = []
if model.isvw(): #score all phrases in sentence together
if len(phrases_to_score) > 0:
LHS, src_phrases, context_reps = zip(*phrases_to_score)
context_reps = np.vstack(context_reps)
sent_num = int(out_filename.split("/")[-1].split('.')[1])
scored_pps_all = model.score_all(context_reps, src_phrases, sent_num, represent)
else:
for LHS, src_phrase, context_rep in phrases_to_score:
scored_pps = model.score(context_rep, src_phrase, represent) #scored_pps gives idx
scored_pps_all.append(scored_pps)
for idx,pps_to_score in enumerate(scored_pps_all): #final processing of scored phrases
LHS = phrases_to_score[idx][0]
#src_phrase, left_idx, right_idx_incl = phrases_for_oracle[idx]
#key = (src_phrase, "%d-%d"%(left_idx, right_idx_incl))
#source_oracle = True if key in dev_rules else False
#target_oracle = dev_rules[key] if key in dev_rules else ""
scored_pps = []
if normalize == "exp":
scored_pps = normalize_exp(pps_to_score)
elif normalize == "range":
scored_pps = normalize_range(pps_to_score, only_pos, log_score)
else:
if only_pos:
for pp,score,reps in pps_to_score:
if score is None:
scored_pps.append((pp, score, reps))
else:
scored_pps.append((pp, 1-np.arccos(score)/math.pi, reps))
else:
scored_pps = pps_to_score
sorted_pps = sorted(scored_pps, key=lambda x: x[1], reverse=True)
best_pp = sorted_pps[0][0]
rank = 0
for pp, score, rep in sorted_pps:
rank += 1
rule_str = "%s ||| %s ||| "%(LHS, pp)
if not no_cca: #meaning we can decorate
rule_str += "cca_off=1" if score is None else "cca_on=1"
if not plain and score is not None:
if log_score:
score = -math.log10(score)
elif print_rank:
score = 1./rank
rule_str += " cca_score=%.3f"%score if log_score else " cca_score=%.5g"%score
if print_best and pp == best_pp:
rule_str += " cca_best=1"
if logistic:
rule_str += " cca_log=%.3f"%expit(score)
if represent: #if outputting context and/or phrase pair representations
assert rep != ""
if covariance:
rep = compute_second_order(rep);
rule_str += " %s"%rep
#if source_oracle:
# rule_str += " oracle_source=1"
#target = pp.split(' ||| ')[1]
#if target == target_oracle:
# rule_str += " oracle_target=1"
rules_out.append(rule_str)
rules_out = list(set(rules_out)) #makes rules unique
out_fh = gzip.open(out_filename, 'wb')
for rule in rules_out:
out_fh.write("%s\n"%rule)
top_rule = "[S] ||| [X_0_%d] ||| [1] ||| 0\n"%len(words) if not plain else "[S] ||| [X] ||| [1] ||| 0\n"
out_fh.write(top_rule)
out_fh.close()
def compute_second_order(rep):
str_rep = []
context_vals = np.array([float(element.split('=')[1]) for element in rep.split() if element.split('=')[0].split('_')[0] == 'c'])
pp_vals = np.array([float(element.split('=')[1]) for element in rep.split() if element.split('=')[0].split('_')[0] == 'pp'])
outer_prod = np.outer(context_vals, pp_vals)
for idx,val in enumerate(outer_prod.flatten()):
str_rep.append("op%d=%.3g"%(idx,val))
#str_rep.append("op_dim%d=%.3g"%(idx,val))
return ' '.join(str_rep)
'''
no need to pass only_pos to normalize_exp because result of exp normalization is always positive
'''
def normalize_exp(scored_pps):
normalizer = sum([math.exp(score) for pp,score,reps in scored_pps if score is not None]) #will be zero if all is none
if normalizer != 0:
normalizer = math.log(normalizer)
normalized_pps = []
for pp, score, reps in scored_pps:
if score is None:
normalized_pps.append((pp, score, reps))
else: #normalizer can be 0 if before log it was 1 (meaning raw score was zero)
pp_norm = (pp, math.exp(score-normalizer), reps) #equivalent to exp(score) / sum_i exp(score_i)
normalized_pps.append(pp_norm)
return normalized_pps
def normalize_range(scored_pps, only_pos, log_score):
sum_vals = 0
num_vals = 0
min_val = 1000000.0
max_val = -1000000.0
for pp, score, reps in scored_pps: #loop through and calculate necessary statistics
if score is not None:
num_vals += 1
sum_vals += score
max_val = score if score > max_val else max_val
min_val = score if score < min_val else min_val
if num_vals > 0:
average = sum_vals / num_vals
normalizer = max_val - min_val
shift = (min_val - average) / normalizer if only_pos and normalizer != 0 else 0
plus_one = 1 if log_score else 0
normalized_pps = []
for pp, score, reps in scored_pps:
if score is None:
normalized_pps.append((pp, score, reps))
else:
pp_norm = (pp, (score-average)/normalizer - shift + plus_one, reps) if normalizer != 0 else (pp, 1./num_vals, reps)
normalized_pps.append(pp_norm)
return normalized_pps
else:
return scored_pps
def decorate_src_rule(hg, inEdgeID):
expr = re.compile(r'\[([^]]*)\]')
rule = hg.edges_[inEdgeID].rule
tail = hg.edges_[inEdgeID].tailNodes[:]
rule_decorated = []
for item in rule.split():
if expr.match(item): #NT, we need to decorate with its span
child = hg.nodes_[tail.pop(0)]
NT = child.cat[:-1] + "_%d_%d]"%(child.i,child.j) if not plain else child.cat
rule_decorated.append(NT)
else:
rule_decorated.append(item)
return ' '.join(rule_decorated)
'''
Function called before the sentence is parsed;
places a pointer to the source rules trie root
along the diagonal of the active chart.
'''
def seedActiveChart(N, active):
for i in range(0, N): #note: for now, we don't test hasRuleForSpan
active.setdefault((i,i), []).append(ActiveItem(grammar_trie.getRoot())) #add the root of the trie
'''
Function that "advances the dot" (in a dotted rule)
on position to the right for all active items in the cell
defined by (start,end). We first perform online binarization
by looping through all split points in the span and then see if
advancing the dot happened to cover a non-terminal (this is handled
in extendActiveItems). We then check and see if advancing the dot
happened to cover a new rule with the additional terminal.
'''
def advanceDotsForAllItemsInCell(start, end, words, active, passive):
for k in range(start+1, end):
extendActiveItems(start, k, end, active, passive)
ec = active[(start,end-1)] if (start,end-1) in active else []
word = words[end-1]
for actItem in ec:
ai = actItem.extendTerminal(word)
if ai is not None:
active.setdefault((start,end), []).append(ai)
if end-start == 1: #OOV handling
if ai is None:
active.setdefault((start,end), []).append(actItem.extendOOV())
else: #check if active item has any rules in its bin
if len(ai.srcTrie.getRules()) == 0: #handles the case where rule starts with OOV word, but no rule that actually covers OOV word
active.setdefault((start,end), []).append(actItem.extendOOV())
'''
function that extends active items over non-terminals.
'''
def extendActiveItems(start, split, end, active, passive):
icell = active[(start, split)] if (start,split) in active else []
idxs = passive[(split, end)] if (split,end) in passive else []
for actItem in icell:
for idx in idxs:
ai = actItem.extendNonTerminal(idx)
if ai is not None:
active.setdefault((start,end), []).append(ai)
'''
Given a rule, does the necessary book-keeping to
convert that rule to the passive chart, and adds the
appropriate nodes and edges to the hypergraph.
'''
def applyRule(start, end, rule, tailNodes, hg, nodemap, passive):
edge = hg.addEdge(rule[1], tailNodes) #rule[1] is src RHS of rule
node = None
cat2NodeMap = {}
if (start,end) in nodemap:
cat2NodeMap = nodemap[(start,end)]
LHS = rule[0]
if LHS in cat2NodeMap: #LHS is either [X] or [S] --> test if this ever fires?
node = hg.nodes_[cat2NodeMap[LHS]]
else:
node = hg.addNode(LHS, start, end)
cat2NodeMap[LHS] = node.id
nodemap[(start,end)] = cat2NodeMap
passive.setdefault((start,end), []).append(node.id)
hg.connectEdgeToHeadNode(edge, node)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.