input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(7, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10020: {'type': 'paintMixer', 'name': 'second',
'comment': '',
'parentEntId': 10015,
'pos': Point3(16.01, -6.47, 23),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-4, -8, -15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10054: {'type': 'paintMixer', 'name': 'first',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-10, -26.1, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(15, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 1,
'waitPercent': 0.1},
20008: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20009: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 17,
'pathScale': 1.0},
20010: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 36,
'pathScale': 1.0},
20012: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 34,
'pathScale': 1.0},
20015: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 37,
'pathScale': 1.0},
20038: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 38,
'pathScale': 1.0},
20039: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 12,
'pathScale': 1.0},
20040: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(41.5, 33.5, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20042: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(15, 34, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20044: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(1.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'pathIndex': 6,
'pathScale': 1.0},
20045: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'pathIndex': 7,
'pathScale': 1.0},
20049: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 13,
'pathScale': 1.0},
20051: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(1, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 42,
'pathScale': 1.0},
20053: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 8,
'pathScale': 1.0},
20055: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 9,
'pathScale': 1.0},
20059: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 10,
'pathScale': 1.0},
20061: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 11,
'pathScale': 1.0},
20067: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 39,
'pathScale': 1.0},
20068: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 40,
'pathScale': 1.0},
20069: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20070: {'type': 'path', 'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20073: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 35,
'pathScale': 1.0},
20075: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(4, 4, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 14,
'pathScale': 1.0},
20076: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 15,
'pathScale': 1.0},
20077: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 16,
'pathScale': 1.0},
20078: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 18,
'pathScale': 1.0},
20079: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20084: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 41,
'pathScale': 1.0},
20097: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 19,
'pathScale': 1.0},
20098: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 20,
'pathScale': 1.0},
20099: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 21,
'pathScale': 1.0},
20100: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 22,
'pathScale': 1.0},
20101: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 23,
'pathScale': 1.0},
20102: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 24,
'pathScale': 1.0},
20103: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 25,
'pathScale': 1.0},
20104: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 26,
'pathScale': 1.0},
20105: {'type': 'path', 'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0), | |
'''
t[0] = ' ' + str(t[1]) + ' '
def p_ORDER2(t):
''' orden : ASC '''
t[0] = ' ' + str(t[1]) + ' '
#? ####################################################################
# TODO EXPRESION DATOS - FALTA
#? ####################################################################
def p_sin_some_any(t):
'''sin_some_any : SOME '''
t[0] = ' ' + str(t[1]) + ' '
def p_sin_some_any2(t):
'''sin_some_any : ANY '''
t[0] = ' ' + str(t[1]) + ' '
#? ####################################################################
# TODO EXPRESION SELECT
#? ####################################################################
def p_opcion_select1(t):
' opcion_select : PARA select_insrt PARC '
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_select2(t):
' opcion_select : expresion '
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select3(t):
'opcion_select : funciones_select '
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select4(t):
'opcion_select : POR '
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select5(t):
' opcion_select : ID PUNTO POR '
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_greatest_insrt(t):
''' greatest_insrt : GREATEST PARA greatest_val PARC
| LEAST PARA greatest_val PARC'''
cadena = ""
for i in t[3]:
cadena+=str(i)
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(cadena) + ' '+ str(t[4]) + ' '
def p_greatest_insrt1(t):
' greatest_val : greatest_val COMA expresion_dato '
t[1].append(t[2])
t[1].append(t[3])
t[0] = t[1]
def p_greatest_insrt2(t):
' greatest_val : expresion_dato'
t[0] = [t[1]]
##################################EXPRESIONES#####################################
def p_funciones_select(t):
''' funciones_select : ABS PARA expresion PARC
| CBRT PARA expresion PARC
| CEIL PARA expresion PARC
| CEILING PARA expresion PARC
| DEGREES PARA expresion PARC
| EXP PARA expresion PARC
| FACTORIAL PARA expresion PARC
| FLOOR PARA expresion PARC
| LN PARA expresion PARC
| LOG PARA expresion PARC
| RADIANS PARA expresion PARC
| ROUND PARA expresion PARC
| SIGN PARA expresion PARC
| SQRT PARA expresion PARC
| TRUNC PARA expresion PARC
| ACOS PARA expresion PARC
| ASIND PARA expresion PARC
| ATAN PARA expresion PARC
| ATAND PARA expresion PARC
| COS PARA expresion PARC
| COT PARA expresion PARC
| COTD PARA expresion PARC
| SIN PARA expresion PARC
| SIND PARA expresion PARC
| TAN PARA expresion PARC
| TAND PARA expresion PARC
| SINH PARA expresion PARC
| COSH PARA expresion PARC
| TANH PARA expresion PARC
| ASINH PARA expresion PARC
| ATANH PARA expresion PARC
| COSD PARA expresion PARC
| ACOSH PARA expresion PARC
| ASIN PARA expresion PARC
| ACOSD PARA expresion PARC
| LENGTH PARA string_type PARC
| TRIM PARA string_type PARC
| SHA256 PARA string_type PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_funciones_select__15(t):
''' funciones_select : DIV PARA expresion COMA expresion PARC '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_funciones_select__16(t):
''' funciones_select : GCD PARA expresion COMA expresion PARC
| MOD PARA expresion COMA expresion PARC
| POWER PARA expresion COMA expresion PARC
| TRUNC PARA expresion COMA ENTERO PARC
| ATAN2 PARA expresion COMA expresion PARC
| ATAN2D PARA expresion COMA expresion PARC
| CONVERT PARA string_type AS TIPO_DATO PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_funciones_select__18(t):
''' funciones_select : SUBSTRING PARA string_type COMA expresion COMA expresion PARC
| SUBSTR PARA string_type COMA expresion COMA expresion PARC
| GET_BYTE PARA string_type D_DOSPTS BYTEA COMA ENTERO PARC
| ENCODE PARA string_type D_DOSPTS BYTEA COMA formato_texto PARC
| DECODE PARA string_type D_DOSPTS BYTEA COMA formato_texto PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '+ str(t[8]) + ' '
def p_funciones_select__10(t):
''' funciones_select : WIDTH_BUCKET PARA expresion COMA expresion COMA expresion COMA expresion PARC
| SET_BYTE PARA string_type D_DOSPTS BYTEA COMA ENTERO COMA ENTERO PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '+ str(t[8]) + ' '+ str(t[9]) + ' '+ str(t[10]) + ' '
def p_funciones_select__11(t):
''' funciones_select : PI PARA PARC
| RANDOM PARA PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_formato_texto(t):
''' formato_texto : ESCAPE '''
t[0] = ' ' + str(t[1]) + ' '
def p_formato_texto_hex(t):
'formato_texto : HEX'
t[0] = ' ' + str(t[1]) + ' '
def p_formato_texto_base64(t):
' formato_texto : BASE64'
t[0] = ' ' + str(t[1]) + ' '
#? ###################################################################
# TODO EXPRESION WHERE
#? ###################################################################
def p_expresion_where2(t):
'expresion_where : expresion_logica_w'
t[0] = ' ' + str(t[1]) + ' '
def p_expresion_where(t):
''' expresion_where : expresion_dato NOT IN PARA select_insrt PARC '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_expresion_where_11(t):
''' expresion_where : expresion_dato IN PARA select_insrt PARC
| NOT EXISTS PARA select_insrt PARC '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_expresion_where_3(t):
''' expresion_where : expresion_dato NOT BETWEEN SYMMETRIC expresion_dato AND expresion_dato'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '
def p_expresion_wherea(t):
'''expresion_wherea : ABS PARA expresion PARC
| LENGTH PARA string_type PARC
| CBRT PARA expresion PARC
| CEIL PARA expresion PARC
| CEILING PARA expresion PARC
| sin_some_any PARA select_insrt PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_expresion_wherea_1(t):
'''expresion_wherea : SUBSTRING PARA string_type COMA expresion COMA expresion PARC
| SUBSTR PARA string_type COMA expresion COMA expresion PARC'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '+ str(t[8]) + ' '
def p_expresion_wherea_2(t):
'''expresion_wherea : TRIM PARA string_type D_DOSPTS BYTEA FROM string_type D_DOSPTS BYTEA PARC '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '+ str(t[8]) + ' '+ str(t[9]) + ' '+ str(t[10]) + ' '
def p_expresion_wherea_3(t):
'''expresion_wherea : EXTRACT PARA extract_time FROM string_type PARC '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_expresion_wherea2(t):
''' expresion_wherea : expresion '''
t[0] = ' ' + str(t[1]) + ' '
#? #########################################################
#ANCHOR EXPRESIONES AGREGADAS AL WHERE
#? ##################################################
def p_expresion_wherea3(t):
''' expresion_wherea : LOWER PARA string_type PARC '''
#NADA
def p_expresion_wherea4(t):
''' expresion_wherea : ID PARA ID PARC'''
#NADA
def p_expresion_isnull_(t):
''' expresion_whereb : expresion_dato IS NULL '''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_experesion_isnull_2(t):
' expresion_whereb : expresion_dato ISNULL'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_expresion_notnull(t):
' expresion_whereb : expresion_dato NOTNULL'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_expresion_true(t):
' expresion_whereb : expresion_dato IS TRUE'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + | |
# coding: utf-8
"""
Licensed to Cloudera, Inc. under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. Cloudera, Inc. licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pprint
import re # noqa: F401
import six
class ConfigurationProperty(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'config_key': 'str',
'name': 'str',
'description': 'str',
'sensitive': 'bool',
'required': 'bool',
'basic': 'bool',
'type': 'str',
'widget': 'str',
'default_value': 'str',
'list_separator': 'str',
'placeholder': 'str',
'valid_values': 'list[ConfigurationPropertyValue]'
}
attribute_map = {
'config_key': 'configKey',
'name': 'name',
'description': 'description',
'sensitive': 'sensitive',
'required': 'required',
'basic': 'basic',
'type': 'type',
'widget': 'widget',
'default_value': 'defaultValue',
'list_separator': 'listSeparator',
'placeholder': 'placeholder',
'valid_values': 'validValues'
}
def __init__(self, config_key=None, name=None, description=None, sensitive=None, required=None, basic=None, type=None, widget=None, default_value=None, list_separator=None, placeholder=None, valid_values=None): # noqa: E501
"""ConfigurationProperty - a model defined in Swagger""" # noqa: E501
self._config_key = None
self._name = None
self._description = None
self._sensitive = None
self._required = None
self._basic = None
self._type = None
self._widget = None
self._default_value = None
self._list_separator = None
self._placeholder = None
self._valid_values = None
self.discriminator = None
self.config_key = config_key
if name is not None:
self.name = name
if description is not None:
self.description = description
if sensitive is not None:
self.sensitive = sensitive
if required is not None:
self.required = required
if basic is not None:
self.basic = basic
if type is not None:
self.type = type
if widget is not None:
self.widget = widget
if default_value is not None:
self.default_value = default_value
if list_separator is not None:
self.list_separator = list_separator
if placeholder is not None:
self.placeholder = placeholder
if valid_values is not None:
self.valid_values = valid_values
@property
def config_key(self):
"""Gets the config_key of this ConfigurationProperty. # noqa: E501
Configuration property key # noqa: E501
:return: The config_key of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._config_key
@config_key.setter
def config_key(self, config_key):
"""Sets the config_key of this ConfigurationProperty.
Configuration property key # noqa: E501
:param config_key: The config_key of this ConfigurationProperty. # noqa: E501
:type: str
"""
if config_key is None:
raise ValueError("Invalid value for `config_key`, must not be `None`") # noqa: E501
self._config_key = config_key
@property
def name(self):
"""Gets the name of this ConfigurationProperty. # noqa: E501
Configuration property name # noqa: E501
:return: The name of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ConfigurationProperty.
Configuration property name # noqa: E501
:param name: The name of this ConfigurationProperty. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ConfigurationProperty. # noqa: E501
Configuration property description # noqa: E501
:return: The description of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ConfigurationProperty.
Configuration property description # noqa: E501
:param description: The description of this ConfigurationProperty. # noqa: E501
:type: str
"""
self._description = description
@property
def sensitive(self):
"""Gets the sensitive of this ConfigurationProperty. # noqa: E501
Whether this property is sensitive # noqa: E501
:return: The sensitive of this ConfigurationProperty. # noqa: E501
:rtype: bool
"""
return self._sensitive
@sensitive.setter
def sensitive(self, sensitive):
"""Sets the sensitive of this ConfigurationProperty.
Whether this property is sensitive # noqa: E501
:param sensitive: The sensitive of this ConfigurationProperty. # noqa: E501
:type: bool
"""
self._sensitive = sensitive
@property
def required(self):
"""Gets the required of this ConfigurationProperty. # noqa: E501
Whether this property is required # noqa: E501
:return: The required of this ConfigurationProperty. # noqa: E501
:rtype: bool
"""
return self._required
@required.setter
def required(self, required):
"""Sets the required of this ConfigurationProperty.
Whether this property is required # noqa: E501
:param required: The required of this ConfigurationProperty. # noqa: E501
:type: bool
"""
self._required = required
@property
def basic(self):
"""Gets the basic of this ConfigurationProperty. # noqa: E501
Whether this property is basic # noqa: E501
:return: The basic of this ConfigurationProperty. # noqa: E501
:rtype: bool
"""
return self._basic
@basic.setter
def basic(self, basic):
"""Sets the basic of this ConfigurationProperty.
Whether this property is basic # noqa: E501
:param basic: The basic of this ConfigurationProperty. # noqa: E501
:type: bool
"""
self._basic = basic
@property
def type(self):
"""Gets the type of this ConfigurationProperty. # noqa: E501
Configuration property type # noqa: E501
:return: The type of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ConfigurationProperty.
Configuration property type # noqa: E501
:param type: The type of this ConfigurationProperty. # noqa: E501
:type: str
"""
allowed_values = ["BOOLEAN", "INTEGER", "DOUBLE", "STRING"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def widget(self):
"""Gets the widget of this ConfigurationProperty. # noqa: E501
Widget used to display this property # noqa: E501
:return: The widget of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._widget
@widget.setter
def widget(self, widget):
"""Sets the widget of this ConfigurationProperty.
Widget used to display this property # noqa: E501
:param widget: The widget of this ConfigurationProperty. # noqa: E501
:type: str
"""
allowed_values = ["RADIO", "CHECKBOX", "TEXT", "PASSWORD", "NUMBER", "TEXTAREA", "FILE", "LIST", "OPENLIST", "MULTI", "OPENMULTI"] # noqa: E501
if widget not in allowed_values:
raise ValueError(
"Invalid value for `widget` ({0}), must be one of {1}" # noqa: E501
.format(widget, allowed_values)
)
self._widget = widget
@property
def default_value(self):
"""Gets the default_value of this ConfigurationProperty. # noqa: E501
Default value for this property # noqa: E501
:return: The default_value of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._default_value
@default_value.setter
def default_value(self, default_value):
"""Sets the default_value of this ConfigurationProperty.
Default value for this property # noqa: E501
:param default_value: The default_value of this ConfigurationProperty. # noqa: E501
:type: str
"""
self._default_value = default_value
@property
def list_separator(self):
"""Gets the list_separator of this ConfigurationProperty. # noqa: E501
Character to use to separate lists # noqa: E501
:return: The list_separator of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._list_separator
@list_separator.setter
def list_separator(self, list_separator):
"""Sets the list_separator of this ConfigurationProperty.
Character to use to separate lists # noqa: E501
:param list_separator: The list_separator of this ConfigurationProperty. # noqa: E501
:type: str
"""
self._list_separator = list_separator
@property
def placeholder(self):
"""Gets the placeholder of this ConfigurationProperty. # noqa: E501
Placeholder value to use if the value is unset # noqa: E501
:return: The placeholder of this ConfigurationProperty. # noqa: E501
:rtype: str
"""
return self._placeholder
@placeholder.setter
def placeholder(self, placeholder):
"""Sets the placeholder of this ConfigurationProperty.
Placeholder value to use if the value is unset # noqa: E501
:param placeholder: The placeholder of this ConfigurationProperty. # noqa: E501
:type: str
"""
self._placeholder = placeholder
@property
def valid_values(self):
"""Gets the valid_values of this ConfigurationProperty. # noqa: E501
List of all valid values for this property # noqa: E501
:return: The valid_values of this ConfigurationProperty. # noqa: E501
:rtype: list[ConfigurationPropertyValue]
"""
return self._valid_values
@valid_values.setter
def valid_values(self, valid_values):
"""Sets the valid_values of this ConfigurationProperty.
List of all valid values for this property # noqa: E501
:param valid_values: The valid_values of this ConfigurationProperty. # noqa: E501
:type: list[ConfigurationPropertyValue]
"""
self._valid_values = valid_values
def to_dict(self):
"""Returns the model | |
= self.session.run(
self.ldif_gradients,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})[0]
vis_this_time = vis >= 2 or (vis >= 1 and (i == 0 or i == step_count - 1))
print_this_time = verbosity >= 2 or (verbosity >= 1 and not i % 1000)
if vis_this_time or print_this_time:
loss = self.session.run(
self.optimizer_elt_loss,
feed_dict={
self.target_class_ph: batch_gt_class,
self.sif_input: cur_vector,
self.optimizer_samples: batch_samples
})
if ret_best:
lsum = np.sum(loss)
if lsum < min_loss:
min_loss = lsum
best_vec = cur_vector.copy()
# Assuming the loss is zero if a constraint is satisfied:
is_sat = self.optimizer_pc - np.count_nonzero(loss)
if print_this_time:
log.info('Step %i: Total loss: %s. Constraints %i/%i' %
(i, repr(np.sum(loss)), is_sat, self.optimizer_pc))
if vis_this_time:
self.vis_loss(
cur_vector,
gt_at_loss=gt_class,
loss=loss,
loss_positions=samples)
if target == 'all-eq':
mults = 42 * [1]
elif target == 'all':
mults = [0.001] + 3 * [0.001] + 6 * [0.0000001] + 32 * [50]
elif target == 'centers':
mults = [0.000] + 3 * [0.001] + 6 * [0.0000000] + 32 * [0]
elif target == 'radii':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000001] + 32 * [0]
elif target == 'features':
mults = [0.000] + 3 * [0.000] + 6 * [0.0000000] + 32 * [50]
elif target == 'constants':
mults = [0.001] + 3 * [0.000] + 6 * [0.0000000] + 32 * [0]
else:
assert False
mults = np.array(mults).reshape([1, 1, 42])
velocity = momentum * velocity + mults * lr * grad
cur_vector = cur_vector - velocity
if verbosity >= 1:
log.info('Finished optimization.')
print_sat_count(cur_vector)
if ret_best:
cur_vector = best_vec
return np.reshape(cur_vector, self.unbatched_vector_shape)
def vis_loss(self, sif_vector, gt_at_loss, loss, loss_positions):
"""Visualizes the loss mid-optimization."""
loss = np.reshape(loss, [-1, 1])
gt_at_loss = np.reshape(gt_at_loss, [-1, 1])
assert gt_at_loss.shape[0] == loss.shape[0]
loss[gt_at_loss <= 0.5] = -loss[gt_at_loss <= 0.5]
loss_positions = np.reshape(loss_positions, [-1, 3])
arr = np.concatenate([loss_positions, loss], axis=1)
with py_util.py2_temporary_directory() as d:
sdf_path = f'{d}/a.sdf'
with file_util.open_file(sdf_path, 'wb') as f:
arr = arr.astype(np.float32)
arr.tofile(f)
m = self.extract_mesh(sif_vector, resolution=128)
m_path = f'{d}/m.ply'
file_util.write_mesh(m_path, m)
init_camera = ('1.0451 1.17901 0.630437 '
'-0.614259 -0.695319 -0.373119 '
'-0.547037 0.715996 -0.433705')
cmd = '%s/ptsview %s %s -camera %s' % (path_util.gaps_path(), sdf_path,
m_path, init_camera)
sp.check_output(cmd, shell=True)
def _grid_eval_cuda(self, sif_vector, resolution, extent):
"""Evaluates a SIF/LDIF densely on a voxel grid."""
log.verbose('Using custom CUDA kernel for evaluation.')
# First step: Get the path where the serialized occnet should be.
# The serialized occnet should be at whatever the checkpoint path is,
# but replace model.ckpt-[idx] with serialized-occnet-[idx].occnet
checkpoint_path = self.ckpt.abspath
log.info(f'Using checkpoint {checkpoint_path} to write OccNet file.')
assert 'model.ckpt-' in checkpoint_path
occnet_path = checkpoint_path.replace('model.ckpt-', 'serialized-occnet-')
occnet_path = occnet_path + '.occnet'
# Second step: If it isn't there, write it to disk.
if not os.path.isfile(occnet_path):
assert os.path.isdir(os.path.dirname(occnet_path))
if self.job.model_config.hparams.ipe == 't':
self.write_occnet_file(occnet_path)
else:
occnet_path = path_util.get_path_to_ldif_root(
) + '/ldif2mesh/extracted.occnet'
# Third step: open a temporary directory, and write the embedding.
# Make sure that the temp directories are deleted afterwards.
with py_util.py2_temporary_directory() as d:
rep_path = f'{d}/ldif.txt'
self.savetxt(sif_vector, rep_path)
# Pick the path to the output grd file:
grd_path = f'{d}/grid.grd'
# Fourth step: Get the path to the kernel
kernel_path = os.path.join(path_util.get_path_to_ldif_root(),
'ldif2mesh/ldif2mesh')
if not os.path.isfile(kernel_path):
raise ValueError(
f'There is no compiled CUDA executable at {kernel_path}.')
cmd = (f'CUDA_VISIBLE_DEVICES=0 {kernel_path} {rep_path} {occnet_path} '
f'{grd_path} -resolution {resolution}')
log.verbose(f'Executing command {cmd}')
# TODO(kgenova) Support extent as a flag
if extent != 0.75:
raise ValueError(
'Currently only 0.75 extent is supported on the '
'custom kernel. Please set use_inference_kernel to false for an'
f' extent of {extent}.')
# Fifth step: Invoke the kernel.
try:
cmd_result = sp.check_output(cmd, shell=True)
log.info(cmd_result.decode('utf-8').replace('\n', ''))
except sp.CalledProcessError as e:
if 'out of memory' in e.output.decode('utf-8'):
raise ValueError(
'The GPU does not have enough free memory left for the'
' inference kernel. Please reduce the fraction'
' reserved by tensorflow.')
elif 'no kernel image is available' in e.output.decode('utf-8'):
raise ValueError(
'It appears that the CUDA kernel was not built to your '
'gpu\'s architecture. Hopefully this is an easy fix. '
'Please go to developer.nvidia.com/cuda-gpus, and find '
'your gpu from the list. Then, modify ./build_kernel.sh '
'by adding compute_XX and sm_XX for whatever your GPU '
'compute capability is according to the website. For '
'example, a 2080 Ti would use compute_75 and sm_75. '
'Note that if your card supports below 35, it likely '
'will fail to compile using this method. If you are '
'seeing this error, please feel free to open up an issue '
'and report it. We would like to support as many gpus as '
'possible.')
else:
raise ValueError(f'Unrecognized error code {e.returncode} occurred'
f' during inference kernel evaluation: {e.output}')
# Seventh step: Read the grid file.
_, grd = file_util.read_grd(grd_path)
# Eighth step: Verify the grid shape and return the grid.
log.verbose(f'The output CUDA grid has shape {grd.shape}.')
# gaps_util.grdview(grd)
return grd
def _grid_eval(self,
sif_vector,
resolution,
extent,
extract_parts,
world2local=None):
"""Evalutes the LDIF/SIF on a grid."""
log.verbose('Evaluating SDF grid for mesh.')
if self.use_inference_kernel and not extract_parts:
return self._grid_eval_cuda(sif_vector, resolution, extent)
if extract_parts or world2local:
log.warning('Part extraction and world2local are not supported with the'
' custom kernel.')
log.warning('Using pure tensorflow for grid evaluation, this will be slow.')
t = time.time()
sif_vector = np.reshape(sif_vector, self.batched_vector_shape)
assert not resolution % self.block_res
block_count = resolution // self.block_res
block_size = (2.0 * extent) / block_count
l_block = []
i = 0
dim_offset = 1 if extract_parts else 0
grid = self.local_decisions if extract_parts else self.predicted_alg_grid
for li in range(block_count):
l_min = -extent + (li) * block_size - 0.5 / resolution
h_block = []
for hi in range(block_count):
h_min = -extent + (hi) * block_size - 0.5 / resolution
w_block = []
for wi in range(block_count):
w_min = -extent + (wi) * block_size - 0.5 / resolution
offset = np.reshape(
np.array([w_min, l_min, h_min], dtype=np.float32), [1, 1, 1, 3])
sample_locations = block_size * self.base_grid + offset
if world2local is not None:
sample_locations = geom_util_np.apply_4x4(
sample_locations, world2local, are_points=True)
grid_out_np = self.session.run(
grid,
feed_dict={
self.sif_input: sif_vector,
self.sample_locations_ph: sample_locations
})
i += 1
w_block.append(grid_out_np)
h_block.append(np.concatenate(w_block, axis=2 + dim_offset))
l_block.append(np.concatenate(h_block, axis=0 + dim_offset))
grid_out = np.concatenate(l_block, axis=1 + dim_offset)
# log.verbose(f'Grid extent: {np.min(grid_out)}, {np.max(grid_out)}')
# grid_out -= 0.5
grid_out_time = time.time()
log.verbose(f'Grid Eval Time: {grid_out_time - t}')
return grid_out
def extract_mesh(self,
sif_vectors,
resolution=128,
extent=0.75,
return_success=False,
world2local=None):
"""Extracts a mesh that is the sum of one or more SIF meshes."""
extract_start_time = time.time()
if isinstance(sif_vectors, list):
volumes = []
if world2local is not None:
assert isinstance(world2local, list)
for i, v in enumerate(sif_vectors):
volumes.append(
self._grid_eval(
v,
resolution,
extent,
extract_parts=False,
world2local=world2local[i]
if world2local is not None else None))
volume = np.sum(volumes, axis=0)
else:
volume = self._grid_eval(
sif_vectors,
resolution,
extent,
extract_parts=False,
world2local=world2local)
grid_out_time = time.time()
log.verbose(f'Grid eval time: {grid_out_time - extract_start_time}')
had_crossing, mesh = extract_mesh.marching_cubes(volume, extent)
if not had_crossing:
log.warning('Warning: Marching Cubes found no surface.')
mesh.marching_cubes_successful = had_crossing
done_time = time.time()
log.verbose(f'MCubes Time: {done_time - grid_out_time}')
if return_success:
return mesh, had_crossing
return mesh
def extract_part_meshes(self, sif_vector, resolution, extent=0.75):
elt_volume = self._grid_eval(
sif_vector, resolution, extent, extract_parts=True, world2local=None)
local_meshes = []
for i in range(self.job.model_config.hparams.sc):
had_crossing, mesh_i = extract_mesh.marching_cubes(
elt_volume[i, ...], extent)
mesh_i.marching_cubes_successful = had_crossing
local_meshes.append(mesh_i)
return local_meshes
def _chunk_sample_eval(self, samples, query_fun, chunk_size):
"""Evaluates a set of query locations chunk by chunk to avoid OOM issues."""
# Note- this code will have strange behavior if there is randomness during
# decoding, because it chunks the decoding up into multiple calls.
assert len(samples.shape) == 2
point_count = samples.shape[0]
if point_count == chunk_size:
chunks = [samples]
else:
pad_len = chunk_size - (point_count % chunk_size)
if pad_len:
samples = np.pad(samples, ((0, pad_len), (0, 0)), 'constant')
assert not (point_count + pad_len) % chunk_size
chunk_count = (point_count + pad_len) // chunk_size
chunks = np.split(samples, chunk_count, axis=0)
out = []
for chunk in chunks:
out_i = query_fun(chunk)
assert len(out_i.shape) == 2
assert out_i.shape[0] == chunk_size
out.append(out_i)
return np.concatenate(out, axis=0)[:point_count, :]
def iou(self, sif_vector, example):
samps = example.uniform_samples[:, :3]
gt_is_inside = example.uniform_samples[:, 3:4] < 0.0
pred_is_inside | |
<reponame>SimplyVC/panic
import copy
import datetime
import json
import logging
import unittest
from queue import Queue
from unittest import mock
import pika
import pika.exceptions
from freezegun import freeze_time
from parameterized import parameterized
from src.alerter.alerters.system import SystemAlerter
from src.alerter.alerts.system_alerts import (
OpenFileDescriptorsIncreasedAboveThresholdAlert)
from src.alerter.grouped_alerts_metric_code import GroupedSystemAlertsMetricCode
from src.configs.alerts.system import SystemAlertsConfig
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils.constants.rabbitmq import (
ALERT_EXCHANGE, HEALTH_CHECK_EXCHANGE,
SYS_ALERTER_INPUT_QUEUE_NAME_TEMPLATE, SYSTEM_ALERT_ROUTING_KEY,
SYSTEM_TRANSFORMED_DATA_ROUTING_KEY_TEMPLATE,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY, TOPIC)
from src.utils.env import ALERTER_PUBLISHING_QUEUE_SIZE, RABBIT_IP
class TestSystemAlerter(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.rabbit_ip = RABBIT_IP
self.alert_input_exchange = ALERT_EXCHANGE
self.connection_check_time_interval = datetime.timedelta(seconds=0)
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.test_rabbit_manager = RabbitMQApi(
self.dummy_logger, RABBIT_IP,
connection_check_time_interval=self.connection_check_time_interval)
self.alerter_name = 'test_alerter'
self.system_id = 'test_system_id'
self.parent_id = 'test_parent_id'
self.system_name = 'test_system'
self.last_monitored = 1611619200
self.publishing_queue = Queue(ALERTER_PUBLISHING_QUEUE_SIZE)
self.test_output_routing_key = 'test_alert.system'
self.queue_used = SYS_ALERTER_INPUT_QUEUE_NAME_TEMPLATE.format(
self.parent_id)
self.target_queue_used = "alert_router_queue"
self.input_routing_key = \
SYSTEM_TRANSFORMED_DATA_ROUTING_KEY_TEMPLATE.format(self.parent_id)
self.bad_output_routing_key = "alert.system.not_real"
self.output_routing_key = SYSTEM_ALERT_ROUTING_KEY.format(
self.parent_id)
self.heartbeat_queue = 'heartbeat queue'
self.heartbeat_test = {
'component_name': self.alerter_name,
'is_alive': True,
'timestamp': datetime.datetime(2012, 1, 1).timestamp()
}
"""
############# Alerts config base configuration ######################
"""
self.enabled_alert = "True"
self.critical_threshold_percentage = 95
self.critical_threshold_seconds = 300
self.critical_repeat_seconds = 300
self.critical_enabled = "True"
self.critical_repeat_enabled = "True"
self.warning_threshold_percentage = 85
self.warning_threshold_seconds = 200
self.warning_enabled = "True"
self.base_config = {
"name": "base_percent_config",
"enabled": self.enabled_alert,
"parent_id": self.parent_id,
"critical_threshold": self.critical_threshold_percentage,
"critical_repeat": self.critical_repeat_seconds,
"critical_repeat_enabled": self.critical_repeat_enabled,
"critical_enabled": self.critical_enabled,
"warning_threshold": self.warning_threshold_percentage,
"warning_enabled": self.warning_enabled
}
self.open_file_descriptors = copy.deepcopy(self.base_config)
self.open_file_descriptors['name'] = "open_file_descriptors"
self.system_cpu_usage = copy.deepcopy(self.base_config)
self.system_cpu_usage['name'] = "system_cpu_usage"
self.system_storage_usage = copy.deepcopy(self.base_config)
self.system_storage_usage['name'] = "system_storage_usage"
self.system_ram_usage = copy.deepcopy(self.base_config)
self.system_ram_usage['name'] = "system_ram_usage"
self.system_is_down = copy.deepcopy(self.base_config)
self.system_is_down['name'] = "system_is_down"
self.system_is_down['critical_threshold'] = \
self.critical_threshold_seconds
self.system_is_down['warning_threshold'] = \
self.warning_threshold_seconds
self.system_alerts_config = SystemAlertsConfig(
self.parent_id,
self.open_file_descriptors,
self.system_cpu_usage,
self.system_storage_usage,
self.system_ram_usage,
self.system_is_down
)
self.test_system_alerter = SystemAlerter(
self.alerter_name,
self.system_alerts_config,
self.dummy_logger,
self.rabbitmq,
ALERTER_PUBLISHING_QUEUE_SIZE
)
"""
############# Alerts config warning alerts disabled ####################
"""
self.base_config['warning_enabled'] = str(
not bool(self.warning_enabled))
self.open_file_descriptors = copy.deepcopy(self.base_config)
self.open_file_descriptors['name'] = "open_file_descriptors"
self.system_cpu_usage = copy.deepcopy(self.base_config)
self.system_cpu_usage['name'] = "system_cpu_usage"
self.system_storage_usage = copy.deepcopy(self.base_config)
self.system_storage_usage['name'] = "system_storage_usage"
self.system_ram_usage = copy.deepcopy(self.base_config)
self.system_ram_usage['name'] = "system_ram_usage"
self.system_is_down = copy.deepcopy(self.base_config)
self.system_is_down['name'] = "system_is_down"
self.system_is_down['critical_threshold'] = \
self.critical_threshold_seconds
self.system_is_down['warning_threshold'] = \
self.warning_threshold_seconds
self.system_alerts_config_warnings_disabled = SystemAlertsConfig(
self.parent_id,
self.open_file_descriptors,
self.system_cpu_usage,
self.system_storage_usage,
self.system_ram_usage,
self.system_is_down
)
self.test_system_alerter_warnings_disabled = SystemAlerter(
self.alerter_name,
self.system_alerts_config_warnings_disabled,
self.dummy_logger,
self.rabbitmq,
ALERTER_PUBLISHING_QUEUE_SIZE
)
"""
############# Alerts config critical alerts disabled ###################
"""
self.base_config['warning_enabled'] = self.warning_enabled
self.base_config['critical_enabled'] = str(
not bool(self.critical_enabled))
self.open_file_descriptors = copy.deepcopy(self.base_config)
self.open_file_descriptors['name'] = "open_file_descriptors"
self.system_cpu_usage = copy.deepcopy(self.base_config)
self.system_cpu_usage['name'] = "system_cpu_usage"
self.system_storage_usage = copy.deepcopy(self.base_config)
self.system_storage_usage['name'] = "system_storage_usage"
self.system_ram_usage = copy.deepcopy(self.base_config)
self.system_ram_usage['name'] = "system_ram_usage"
self.system_is_down = copy.deepcopy(self.base_config)
self.system_is_down['name'] = "system_is_down"
self.system_is_down['critical_threshold'] = \
self.critical_threshold_seconds
self.system_is_down['warning_threshold'] = \
self.warning_threshold_seconds
self.system_alerts_config_critical_disabled = SystemAlertsConfig(
self.parent_id,
self.open_file_descriptors,
self.system_cpu_usage,
self.system_storage_usage,
self.system_ram_usage,
self.system_is_down
)
self.test_system_alerter_critical_disabled = SystemAlerter(
self.alerter_name,
self.system_alerts_config_critical_disabled,
self.dummy_logger,
self.rabbitmq,
ALERTER_PUBLISHING_QUEUE_SIZE
)
"""
########## Alerts config critical repeat alerts disabled ###############
"""
self.base_config['warning_enabled'] = self.warning_enabled
self.base_config['critical_enabled'] = self.critical_enabled
self.base_config['critical_repeat_enabled'] = str(
not bool(self.critical_repeat_enabled))
self.open_file_descriptors = copy.deepcopy(self.base_config)
self.open_file_descriptors['name'] = "open_file_descriptors"
self.system_cpu_usage = copy.deepcopy(self.base_config)
self.system_cpu_usage['name'] = "system_cpu_usage"
self.system_storage_usage = copy.deepcopy(self.base_config)
self.system_storage_usage['name'] = "system_storage_usage"
self.system_ram_usage = copy.deepcopy(self.base_config)
self.system_ram_usage['name'] = "system_ram_usage"
self.system_is_down = copy.deepcopy(self.base_config)
self.system_is_down['name'] = "system_is_down"
self.system_is_down['critical_threshold'] = \
self.critical_threshold_seconds
self.system_is_down['warning_threshold'] = \
self.warning_threshold_seconds
self.system_alerts_config_critical_repeat_disabled = SystemAlertsConfig(
self.parent_id,
self.open_file_descriptors,
self.system_cpu_usage,
self.system_storage_usage,
self.system_ram_usage,
self.system_is_down
)
self.test_system_alerter_critical_repeat_disabled = SystemAlerter(
self.alerter_name,
self.system_alerts_config_critical_repeat_disabled,
self.dummy_logger,
self.rabbitmq,
ALERTER_PUBLISHING_QUEUE_SIZE
)
"""
############# Alerts config all alerts disabled ######################
"""
self.base_config['warning_enabled'] = self.warning_enabled
self.base_config['critical_enabled'] = self.critical_enabled
self.base_config['enabled'] = str(not bool(self.enabled_alert))
self.open_file_descriptors = copy.deepcopy(self.base_config)
self.open_file_descriptors['name'] = "open_file_descriptors"
self.system_cpu_usage = copy.deepcopy(self.base_config)
self.system_cpu_usage['name'] = "system_cpu_usage"
self.system_storage_usage = copy.deepcopy(self.base_config)
self.system_storage_usage['name'] = "system_storage_usage"
self.system_ram_usage = copy.deepcopy(self.base_config)
self.system_ram_usage['name'] = "system_ram_usage"
self.system_is_down = copy.deepcopy(self.base_config)
self.system_is_down['name'] = "system_is_down"
self.system_is_down['critical_threshold'] = \
self.critical_threshold_seconds
self.system_is_down['warning_threshold'] = \
self.warning_threshold_seconds
self.system_alerts_config_all_disabled = SystemAlertsConfig(
self.parent_id,
self.open_file_descriptors,
self.system_cpu_usage,
self.system_storage_usage,
self.system_ram_usage,
self.system_is_down
)
self.test_system_alerter_all_disabled = SystemAlerter(
self.alerter_name,
self.system_alerts_config_all_disabled,
self.dummy_logger,
self.rabbitmq,
ALERTER_PUBLISHING_QUEUE_SIZE
)
"""
################# Metrics Received from Data Transformer ############
"""
self.warning = "WARNING"
self.info = "INFO"
self.critical = "CRITICAL"
self.error = "ERROR"
self.none = None
# Process CPU Seconds Total
self.current_cpu_sec = 42420.88
self.previous_cpu_sec = 42400.42
# Process Memory Usage
self.current_mem_use = 20.00
self.previous_mem_use = 10.23
# Virtual Memory Usage
self.current_v_mem_use = 735047680.0
self.previous_v_mem_use = 723312578.0
self.percent_usage = 40
self.data_received_error_data = {
"error": {
"meta_data": {
"system_name": self.system_name,
"system_id": self.system_id,
"system_parent_id": self.parent_id,
"time": self.last_monitored
},
"data": {
"went_down_at": {
"current": self.last_monitored,
"previous": self.none
}
},
"message": "Error message",
"code": 5004,
}
}
self.data_received_initially_no_alert = {
"result": {
"meta_data": {
"system_name": self.system_name,
"system_id": self.system_id,
"system_parent_id": self.parent_id,
"last_monitored": self.last_monitored
},
"data": {
"process_cpu_seconds_total": {
"current": self.current_cpu_sec,
"previous": self.none
},
"process_memory_usage": {
"current": self.current_mem_use,
"previous": self.none
},
"virtual_memory_usage": {
"current": self.current_v_mem_use,
"previous": self.none
},
"open_file_descriptors": {
"current": self.percent_usage,
"previous": self.none
},
"system_cpu_usage": {
"current": self.percent_usage,
"previous": self.none
},
"system_ram_usage": {
"current": self.percent_usage,
"previous": self.none
},
"system_storage_usage": {
"current": self.percent_usage,
"previous": self.none
},
"network_receive_bytes_total": {
"current": self.none,
"previous": self.none,
},
"network_transmit_bytes_total": {
"current": self.none,
"previous": self.none
},
"disk_io_time_seconds_total": {
"current": self.none,
"previous": self.none,
},
"network_transmit_bytes_per_second": {
"current": self.none,
"previous": self.none,
},
"network_receive_bytes_per_second": {
"current": self.none,
"previous": self.none
},
"disk_io_time_seconds_in_interval": {
"current": self.none,
"previous": self.none,
},
"went_down_at": {
"current": self.none,
"previous": self.none
}
}
}
}
self.data_received_initially_warning_alert = \
copy.deepcopy(self.data_received_initially_no_alert)
self.data_received_initially_warning_alert[
'result']['data']['open_file_descriptors']['current'] = \
self.percent_usage + 46
self.data_received_initially_warning_alert[
'result']['data']['system_cpu_usage']['current'] = \
self.percent_usage + 46
self.data_received_initially_warning_alert[
'result']['data']['system_ram_usage']['current'] = \
self.percent_usage + 46
self.data_received_initially_warning_alert[
'result']['data']['system_storage_usage']['current'] = \
self.percent_usage + 46
self.data_received_below_warning_threshold = \
copy.deepcopy(self.data_received_initially_no_alert)
self.data_received_below_warning_threshold[
'result']['data']['open_file_descriptors']['previous'] = \
self.percent_usage + 46
self.data_received_below_warning_threshold[
'result']['data']['system_cpu_usage']['previous'] = \
self.percent_usage + 46
self.data_received_below_warning_threshold[
'result']['data']['system_ram_usage']['previous'] = \
self.percent_usage + 46
self.data_received_below_warning_threshold[
'result']['data']['system_storage_usage']['previous'] = \
self.percent_usage + 46
self.data_received_initially_critical_alert = \
copy.deepcopy(self.data_received_initially_no_alert)
self.data_received_initially_critical_alert[
'result']['data']['open_file_descriptors']['current'] = \
self.percent_usage + 56
self.data_received_initially_critical_alert[
'result']['data']['system_cpu_usage']['current'] = \
self.percent_usage + 56
self.data_received_initially_critical_alert[
'result']['data']['system_ram_usage']['current'] = \
self.percent_usage + 56
self.data_received_initially_critical_alert[
'result']['data']['system_storage_usage']['current'] = \
self.percent_usage + 56
self.data_received_below_critical_above_warning = \
copy.deepcopy(self.data_received_initially_warning_alert)
self.data_received_below_critical_above_warning[
'result']['data']['open_file_descriptors']['previous'] = \
self.percent_usage + 56
self.data_received_below_critical_above_warning[
'result']['data']['system_cpu_usage']['previous'] = \
self.percent_usage + 56
self.data_received_below_critical_above_warning[
'result']['data']['system_ram_usage']['previous'] = \
self.percent_usage + 56
self.data_received_below_critical_above_warning[
'result']['data']['system_storage_usage']['previous'] = \
self.percent_usage + 56
# Alert used for rabbitMQ testing
self.alert = OpenFileDescriptorsIncreasedAboveThresholdAlert(
self.system_name, self.percent_usage + 46, self.warning,
self.last_monitored, self.warning, self.parent_id,
self.system_id
)
try:
self.test_system_alerter.rabbitmq.connect()
self.test_system_alerter.rabbitmq.exchange_declare(
HEALTH_CHECK_EXCHANGE, TOPIC, False, True, False, False)
self.test_system_alerter.rabbitmq.exchange_declare(
ALERT_EXCHANGE, TOPIC, False, True, False, False)
except Exception as e:
print("Setup failed: {}".format(e))
def tearDown(self) -> None:
# Delete any queues and exchanges which are common across many tests
try:
self.test_system_alerter.rabbitmq.connect()
self.test_rabbit_manager.connect()
# Declare the queues incase they aren't there, not to error
self.test_system_alerter.rabbitmq.queue_declare(
queue=self.target_queue_used, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.test_system_alerter.rabbitmq.queue_declare(
queue=self.queue_used, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.test_system_alerter.rabbitmq.queue_purge(self.queue_used)
self.test_system_alerter.rabbitmq.queue_purge(
self.target_queue_used)
self.test_system_alerter.rabbitmq.queue_delete(self.queue_used)
self.test_system_alerter.rabbitmq.queue_delete(
self.target_queue_used)
self.test_system_alerter.rabbitmq.exchange_delete(
HEALTH_CHECK_EXCHANGE)
self.test_system_alerter.rabbitmq.exchange_delete(ALERT_EXCHANGE)
self.test_system_alerter.rabbitmq.disconnect()
self.test_rabbit_manager.disconnect()
except Exception as e:
print("Test failed: {}".format(e))
self.dummy_logger = None
self.rabbitmq = None
self.publishing_queue = None
self.test_system_alerter = None
self.test_system_alerter_warnings_disabled = None
self.test_system_alerter_critical_disabled = None
self.test_system_alerter_all_disabled = None
self.test_system_alerter_critical_repeat_disabled = None
self.system_alerts_config = None
self.system_alerts_config_warnings_disabled = None
self.system_alerts_config_critical_disabled = None
self.system_alerts_config_all_disabled = None
self.system_alerts_config_critical_repeat_disabled = None
self.test_system_alerter = None
def test_returns_alerter_name_as_str(self) -> None:
self.assertEqual(self.alerter_name, self.test_system_alerter.__str__())
def test_returns_alerter_name(self) -> None:
self.assertEqual(self.alerter_name,
self.test_system_alerter.alerter_name)
def test_returns_logger(self) -> None:
self.assertEqual(self.dummy_logger, self.test_system_alerter.logger)
def test_returns_publishing_queue_size(self) -> None:
self.assertEqual(self.publishing_queue.qsize(),
self.test_system_alerter.publishing_queue.qsize())
def test_returns_alerts_configs_from_alerter(self) -> None:
self.assertEqual(self.system_alerts_config,
self.test_system_alerter.alerts_configs)
"""
###################### Tests without using RabbitMQ #######################
"""
@mock.patch.object(SystemAlerter, "_classify_alert")
def test_alerts_initial_run_no_alerts_count_classify_alert(
self, mock_classify_alert) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
self.assertEqual(4, mock_classify_alert.call_count)
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageDecreasedBelowThresholdAlert",
autospec=True)
def test_initial_run_no_increase_alerts_or_decrease_alerts(
self, mock_storage_usage_decrease, mock_storage_usage_increase,
mock_ram_usage_decrease, mock_ram_usage_increase,
mock_cpu_usage_decrease, mock_cpu_usage_increase,
mock_ofd_decrease, mock_ofd_increase) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_storage_usage_decrease.assert_not_called()
mock_storage_usage_increase.assert_not_called()
mock_ram_usage_decrease.assert_not_called()
mock_ram_usage_increase.assert_not_called()
mock_cpu_usage_decrease.assert_not_called()
mock_cpu_usage_increase.assert_not_called()
mock_ofd_decrease.assert_not_called()
mock_ofd_increase.assert_not_called()
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".OpenFileDescriptorsDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemCPUUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemRAMUsageDecreasedBelowThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageIncreasedAboveThresholdAlert",
autospec=True)
@mock.patch(
"src.alerter.alerters.system"
".SystemStorageUsageDecreasedBelowThresholdAlert",
autospec=True)
def test_initial_run_no_alerts_second_run_no_alerts(
self, mock_storage_usage_decrease, mock_storage_usage_increase,
mock_ram_usage_decrease, mock_ram_usage_increase,
mock_cpu_usage_decrease, mock_cpu_usage_increase,
mock_ofd_decrease, mock_ofd_increase) -> None:
data_for_alerting = []
data = self.data_received_initially_no_alert['result']['data']
meta_data = self.data_received_initially_no_alert['result']['meta_data']
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_storage_usage_decrease.assert_not_called()
mock_storage_usage_increase.assert_not_called()
mock_ram_usage_decrease.assert_not_called()
mock_ram_usage_increase.assert_not_called()
mock_cpu_usage_decrease.assert_not_called()
mock_cpu_usage_increase.assert_not_called()
mock_ofd_decrease.assert_not_called()
mock_ofd_increase.assert_not_called()
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: {}".format(e))
self.test_system_alerter._create_state_for_system(self.system_id)
self.test_system_alerter._process_results(
data, meta_data, data_for_alerting)
try:
mock_storage_usage_decrease.assert_not_called()
mock_storage_usage_increase.assert_not_called()
mock_ram_usage_decrease.assert_not_called()
mock_ram_usage_increase.assert_not_called()
mock_cpu_usage_decrease.assert_not_called()
mock_cpu_usage_increase.assert_not_called()
mock_ofd_decrease.assert_not_called()
mock_ofd_increase.assert_not_called()
self.assertEqual(0, len(data_for_alerting))
except AssertionError as e:
self.fail("Test failed: | |
(0.89020, 0.00392, 0.00784),
(0.90196, 0.00392, 0.00784),
(0.90980, 0.00392, 0.00784),
(0.91765, 0.00392, 0.00784),
(0.92941, 0.00392, 0.00784),
(0.93725, 0.00392, 0.00784),
(0.94510, 0.00392, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.95686, 0.00784, 0.00784),
(0.96078, 0.00784, 0.00784),
(0.96471, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.96863, 0.00784, 0.00784),
(0.97255, 0.00784, 0.00784),
(0.97647, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98039, 0.00784, 0.00784),
(0.98431, 0.00784, 0.00784),
(0.98824, 0.00784, 0.00784),
(0.99216, 0.00784, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.00392, 0.00784),
(0.99608, 0.01176, 0.00784),
(0.99608, 0.01961, 0.00784),
(0.99608, 0.03137, 0.00784),
(0.99608, 0.03922, 0.00784),
(0.99608, 0.04706, 0.00784),
(0.99608, 0.05882, 0.00784),
(0.99608, 0.06667, 0.00784),
(0.99608, 0.07451, 0.00784),
(0.99608, 0.08627, 0.00784),
(0.99608, 0.09412, 0.00784),
(0.99608, 0.10196, 0.00784),
(0.99608, 0.11373, 0.00784),
(0.99608, 0.12157, 0.00784),
(0.99608, 0.12941, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14118, 0.00784),
(0.99608, 0.14902, 0.01176),
(0.99608, 0.15686, 0.01569),
(0.99608, 0.16471, 0.01961),
(1.00000, 0.17647, 0.02745),
(1.00000, 0.18824, 0.03529),
(1.00000, 0.20000, 0.04706),
(1.00000, 0.21176, 0.05490),
(1.00000, 0.22745, 0.06667),
(1.00000, 0.23922, 0.07843),
(1.00000, 0.25098, 0.09020),
(1.00000, 0.26275, 0.10588),
(1.00000, 0.27451, 0.11765),
(1.00000, 0.28627, 0.13333),
(1.00000, 0.30196, 0.15294),
(1.00000, 0.32157, 0.17255),
(1.00000, 0.34118, 0.19216),
(1.00000, 0.36078, 0.21569),
(1.00000, 0.37647, 0.23529),
(1.00000, 0.39216, 0.25490),
(1.00000, 0.40784, 0.27843),
(1.00000, 0.42353, 0.29804),
(1.00000, 0.44314, 0.32157),
(1.00000, 0.46667, 0.34902),
(1.00000, 0.49020, 0.38039),
(1.00000, 0.51373, 0.40784),
(1.00000, 0.54118, 0.43922),
(1.00000, 0.56471, 0.47059),
(1.00000, 0.59216, 0.50196),
(1.00000, 0.61569, 0.53333),
(1.00000, 0.64314, 0.56863),
(1.00000, 0.67059, 0.60000),
(1.00000, 0.69804, 0.63529),
(1.00000, 0.72549, 0.67059),
(1.00000, 0.75686, 0.70588),
(1.00000, 0.78431, 0.74118),
(1.00000, 0.81569, 0.77647),
(1.00000, 0.84314, 0.81176),
(1.00000, 0.87451, 0.85098),
(1.00000, 0.89804, 0.87843),
(1.00000, 0.92157, 0.90980),
(1.00000, 0.94902, 0.93725),
(1.00000, 0.97255, 0.96863),
(1.00000, 1.00000, 1.00000),
)
cmap_rainbow2 = (
(0.00000, 0.00000, 0.00000),
(0.03137, 0.00000, 0.03137),
(0.06275, 0.00000, 0.06275),
(0.09412, 0.00000, 0.09412),
(0.12549, 0.00000, 0.12549),
(0.15686, 0.00000, 0.15686),
(0.18824, 0.00000, 0.18824),
(0.21961, 0.00000, 0.21961),
(0.25098, 0.00000, 0.25098),
(0.28235, 0.00000, 0.28235),
(0.31373, 0.00000, 0.31373),
(0.34510, 0.00000, 0.34510),
(0.37647, 0.00000, 0.37647),
(0.40784, 0.00000, 0.40784),
(0.43922, 0.00000, 0.43922),
(0.47059, 0.00000, 0.47059),
(0.50196, 0.00000, 0.50196),
(0.53333, 0.00000, 0.53333),
(0.56471, 0.00000, 0.56471),
(0.59608, 0.00000, 0.59608),
(0.62745, 0.00000, 0.62745),
(0.65882, 0.00000, 0.65882),
(0.69020, 0.00000, 0.69020),
(0.72157, 0.00000, 0.72157),
(0.75294, 0.00000, 0.75294),
(0.78431, 0.00000, 0.78431),
(0.81569, 0.00000, 0.81569),
(0.84706, 0.00000, 0.84706),
(0.87843, 0.00000, 0.87843),
(0.90980, 0.00000, 0.90980),
(0.94118, 0.00000, 0.94118),
(0.97255, 0.00000, 0.97255),
(1.00000, 0.00000, 1.00000),
(0.96863, 0.00000, 1.00000),
(0.93725, 0.00000, 1.00000),
(0.90588, 0.00000, 1.00000),
(0.87451, 0.00000, 1.00000),
(0.84314, 0.00000, 1.00000),
(0.81176, 0.00000, 1.00000),
(0.78039, 0.00000, 1.00000),
(0.74902, 0.00000, 1.00000),
(0.71765, 0.00000, 1.00000),
(0.68627, 0.00000, 1.00000),
(0.65490, 0.00000, 1.00000),
(0.62353, 0.00000, 1.00000),
(0.59216, 0.00000, 1.00000),
(0.56078, 0.00000, 1.00000),
(0.52941, 0.00000, 1.00000),
(0.49804, 0.00000, 1.00000),
(0.46667, 0.00000, 1.00000),
(0.43529, 0.00000, 1.00000),
(0.40392, 0.00000, 1.00000),
(0.37255, 0.00000, 1.00000),
(0.34118, 0.00000, 1.00000),
(0.30980, 0.00000, 1.00000),
(0.27843, 0.00000, 1.00000),
(0.24706, 0.00000, 1.00000),
(0.21569, 0.00000, 1.00000),
(0.18431, 0.00000, 1.00000),
(0.15294, 0.00000, 1.00000),
(0.12157, 0.00000, 1.00000),
(0.09020, 0.00000, 1.00000),
(0.05882, 0.00000, 1.00000),
(0.02745, 0.00000, 1.00000),
(0.00000, 0.00000, 1.00000),
(0.00000, 0.03137, 1.00000),
(0.00000, 0.06275, 1.00000),
(0.00000, 0.09412, 1.00000),
(0.00000, 0.12549, 1.00000),
(0.00000, 0.15686, 1.00000),
(0.00000, 0.18824, 1.00000),
(0.00000, 0.21961, 1.00000),
(0.00000, 0.25098, 1.00000),
(0.00000, 0.28235, 1.00000),
(0.00000, 0.31373, 1.00000),
(0.00000, 0.34510, 1.00000),
(0.00000, 0.37647, 1.00000),
(0.00000, 0.40784, 1.00000),
(0.00000, 0.43922, 1.00000),
(0.00000, 0.47059, 1.00000),
(0.00000, 0.50196, 1.00000),
(0.00000, 0.53333, 1.00000),
(0.00000, 0.56471, 1.00000),
(0.00000, 0.59608, 1.00000),
(0.00000, 0.62745, 1.00000),
(0.00000, 0.65882, 1.00000),
(0.00000, 0.69020, 1.00000),
(0.00000, 0.72157, 1.00000),
(0.00000, 0.75294, 1.00000),
(0.00000, 0.78431, 1.00000),
(0.00000, 0.81569, 1.00000),
(0.00000, 0.84706, 1.00000),
(0.00000, 0.87843, 1.00000),
(0.00000, 0.90980, 1.00000),
(0.00000, 0.94118, 1.00000),
(0.00000, 0.97255, 1.00000),
(0.00000, 1.00000, 1.00000),
(0.00000, 1.00000, 0.96863),
(0.00000, 1.00000, 0.93725),
(0.00000, 1.00000, 0.90588),
(0.00000, 1.00000, 0.87451),
(0.00000, 1.00000, 0.84314),
(0.00000, 1.00000, 0.81176),
(0.00000, 1.00000, 0.78039),
(0.00000, 1.00000, 0.74902),
(0.00000, 1.00000, 0.71765),
(0.00000, 1.00000, 0.68627),
(0.00000, 1.00000, 0.65490),
(0.00000, 1.00000, 0.62353),
(0.00000, 1.00000, 0.59216),
(0.00000, 1.00000, 0.56078),
(0.00000, 1.00000, 0.52941),
(0.00000, 1.00000, 0.49804),
(0.00000, 1.00000, 0.46667),
(0.00000, 1.00000, 0.43529),
(0.00000, 1.00000, 0.40392),
(0.00000, 1.00000, 0.37255),
(0.00000, 1.00000, 0.34118),
(0.00000, 1.00000, 0.30980),
(0.00000, 1.00000, 0.27843),
(0.00000, 1.00000, 0.24706),
(0.00000, 1.00000, 0.21569),
(0.00000, 1.00000, 0.18431),
(0.00000, 1.00000, 0.15294),
(0.00000, 1.00000, 0.12157),
(0.00000, 1.00000, 0.09020),
(0.00000, 1.00000, 0.05882),
(0.00000, 1.00000, 0.02745),
(0.00000, 1.00000, 0.00000),
(0.03137, 1.00000, 0.00000),
(0.06275, 1.00000, 0.00000),
(0.09412, 1.00000, 0.00000),
(0.12549, 1.00000, 0.00000),
(0.15686, 1.00000, 0.00000),
(0.18824, 1.00000, 0.00000),
(0.21961, 1.00000, 0.00000),
(0.25098, 1.00000, 0.00000),
(0.28235, 1.00000, 0.00000),
(0.31373, 1.00000, 0.00000),
(0.34510, 1.00000, 0.00000),
(0.37647, 1.00000, 0.00000),
(0.40784, 1.00000, 0.00000),
(0.43922, 1.00000, 0.00000),
(0.47059, 1.00000, 0.00000),
(0.50196, 1.00000, 0.00000),
(0.53333, 1.00000, 0.00000),
(0.56471, 1.00000, 0.00000),
(0.59608, 1.00000, 0.00000),
(0.62745, 1.00000, 0.00000),
(0.65882, 1.00000, 0.00000),
(0.69020, 1.00000, 0.00000),
(0.72157, 1.00000, 0.00000),
(0.75294, 1.00000, 0.00000),
(0.78431, 1.00000, 0.00000),
(0.81569, 1.00000, 0.00000),
(0.84706, 1.00000, 0.00000),
(0.87843, 1.00000, 0.00000),
(0.90980, 1.00000, 0.00000),
(0.94118, 1.00000, 0.00000),
(0.97255, 1.00000, 0.00000),
(1.00000, 1.00000, 0.00000),
(1.00000, 0.98431, 0.00000),
(1.00000, 0.96863, 0.00000),
(1.00000, 0.95294, 0.00000),
(1.00000, 0.93725, 0.00000),
(1.00000, 0.92157, 0.00000),
(1.00000, 0.90588, 0.00000),
(1.00000, 0.89020, 0.00000),
(1.00000, 0.87451, 0.00000),
(1.00000, 0.85882, 0.00000),
(1.00000, 0.84314, 0.00000),
(1.00000, 0.82745, 0.00000),
(1.00000, 0.81176, 0.00000),
(1.00000, 0.79608, 0.00000),
(1.00000, 0.78039, 0.00000),
(1.00000, 0.76471, 0.00000),
(1.00000, 0.74902, 0.00000),
(1.00000, 0.73333, 0.00000),
(1.00000, 0.71765, 0.00000),
(1.00000, 0.70196, 0.00000),
(1.00000, 0.68627, 0.00000),
(1.00000, 0.67059, 0.00000),
(1.00000, 0.65490, 0.00000),
(1.00000, 0.63922, 0.00000),
(1.00000, 0.62353, 0.00000),
(1.00000, 0.60784, 0.00000),
(1.00000, 0.59216, 0.00000),
(1.00000, 0.57647, 0.00000),
(1.00000, 0.56078, 0.00000),
(1.00000, 0.54510, 0.00000),
(1.00000, 0.52941, 0.00000),
(1.00000, 0.51373, 0.00000),
(1.00000, 0.49804, 0.00000),
(1.00000, 0.48235, 0.00000),
(1.00000, 0.46667, 0.00000),
(1.00000, 0.45098, 0.00000),
(1.00000, 0.43529, 0.00000),
(1.00000, 0.41961, 0.00000),
(1.00000, 0.40392, 0.00000),
(1.00000, 0.38824, 0.00000),
(1.00000, 0.37255, 0.00000),
(1.00000, 0.35686, 0.00000),
(1.00000, 0.34118, 0.00000),
(1.00000, 0.32549, 0.00000),
(1.00000, 0.30980, 0.00000),
(1.00000, 0.29412, 0.00000),
(1.00000, 0.27843, 0.00000),
(1.00000, 0.26275, 0.00000),
(1.00000, 0.24706, 0.00000),
(1.00000, 0.23137, 0.00000),
(1.00000, 0.21569, 0.00000),
(1.00000, 0.20000, 0.00000),
(1.00000, 0.18431, 0.00000),
(1.00000, 0.16863, 0.00000),
(1.00000, 0.15294, 0.00000),
(1.00000, 0.13725, 0.00000),
(1.00000, 0.12157, 0.00000),
(1.00000, 0.10588, 0.00000),
(1.00000, 0.09020, 0.00000),
(1.00000, 0.07451, 0.00000),
(1.00000, 0.05882, 0.00000),
(1.00000, 0.04314, 0.00000),
(1.00000, 0.02745, 0.00000),
(1.00000, 0.01176, 0.00000),
(1.00000, 0.00000, 0.00000),
(1.00000, 0.03137, 0.03137),
(1.00000, 0.06275, 0.06275),
(1.00000, 0.09412, 0.09412),
(1.00000, 0.12549, 0.12549),
(1.00000, 0.15686, 0.15686),
(1.00000, 0.18824, 0.18824),
(1.00000, 0.21961, 0.21961),
(1.00000, 0.25098, 0.25098),
(1.00000, 0.28235, 0.28235),
(1.00000, 0.31373, 0.31373),
(1.00000, 0.34510, 0.34510),
(1.00000, 0.37647, 0.37647),
(1.00000, 0.40784, 0.40784),
(1.00000, 0.43922, 0.43922),
(1.00000, 0.47059, 0.47059),
(1.00000, 0.50196, 0.50196),
(1.00000, 0.53333, 0.53333),
(1.00000, 0.56471, 0.56471),
(1.00000, 0.59608, 0.59608),
(1.00000, 0.62745, 0.62745),
(1.00000, 0.65882, 0.65882),
(1.00000, 0.69020, 0.69020),
(1.00000, 0.72157, 0.72157),
(1.00000, 0.75294, 0.75294),
(1.00000, 0.78431, 0.78431),
(1.00000, 0.81569, 0.81569),
(1.00000, 0.84706, 0.84706),
(1.00000, 0.87843, 0.87843),
(1.00000, 0.90980, 0.90980),
(1.00000, 0.94118, 0.94118),
(1.00000, 0.97255, 0.97255),
)
cmap_real = (
(0.00784, 0.00392, 0.00000), # noqa
(0.01569, 0.00784, 0.00000),
(0.02353, 0.01176, 0.00000),
(0.03137, 0.01569, 0.00000),
(0.03922, 0.01961, 0.00000),
(0.04706, 0.02353, 0.00000),
(0.05490, 0.02745, 0.00000),
(0.06275, 0.03137, 0.00000),
(0.07059, 0.03529, 0.00000),
(0.07843, 0.03922, 0.00000),
(0.08627, 0.04314, 0.00000),
(0.09412, 0.04706, 0.00000),
(0.10196, 0.05098, 0.00000),
(0.10980, 0.05490, 0.00000),
(0.11765, 0.05882, 0.00000),
(0.12549, 0.06275, 0.00000),
(0.13333, 0.06667, 0.00000),
(0.14118, 0.07059, 0.00000),
(0.14902, 0.07451, 0.00000),
(0.15686, 0.07843, 0.00000),
(0.16471, 0.08235, 0.00000),
(0.17255, 0.08627, 0.00000),
(0.18039, 0.09020, 0.00000),
(0.18824, 0.09412, 0.00000),
(0.19608, 0.09804, 0.00000),
(0.20392, 0.10196, 0.00000),
(0.21176, 0.10588, 0.00000),
(0.21961, 0.10980, 0.00000),
(0.22745, 0.11373, 0.00000),
(0.23529, 0.11765, 0.00000),
(0.24314, 0.12157, 0.00000),
(0.25098, 0.12549, 0.00000),
(0.25882, 0.12941, 0.00000),
(0.26667, 0.13333, 0.00000),
(0.27451, 0.13725, 0.00000),
(0.28235, 0.14118, 0.00000),
(0.29020, 0.14510, 0.00000),
(0.29804, 0.14902, 0.00000),
(0.30588, 0.15294, 0.00000),
(0.31373, 0.15686, 0.00000),
(0.32157, 0.16078, 0.00000),
(0.32941, 0.16471, 0.00000),
(0.33725, 0.16863, 0.00000),
(0.34510, 0.17255, 0.00000),
(0.35294, 0.17647, 0.00000),
(0.36078, 0.18039, 0.00000),
(0.36863, 0.18431, 0.00000),
(0.37647, 0.18824, 0.00000),
(0.38431, 0.19216, 0.00000),
(0.39216, 0.19608, 0.00000),
(0.40000, 0.20000, 0.00000),
(0.40784, 0.20392, 0.00000),
(0.41569, 0.20784, 0.00000),
(0.42353, 0.21176, 0.00000),
(0.43137, 0.21569, 0.00000),
(0.43922, 0.21961, 0.00000),
(0.44706, 0.22353, 0.00000),
(0.45490, 0.22745, 0.00000),
(0.46275, 0.23137, 0.00000),
(0.47059, 0.23529, 0.00000),
(0.47843, 0.23922, 0.00000),
(0.48627, 0.24314, 0.00000),
(0.49412, 0.24706, 0.00000),
(0.50196, 0.25098, 0.00000),
(0.50980, 0.25490, 0.00000),
(0.51765, 0.25882, 0.00000),
(0.52549, 0.26275, 0.00000),
(0.53333, 0.26667, 0.00000),
(0.54118, 0.27059, 0.00000),
(0.54902, 0.27451, 0.00000),
(0.55686, 0.27843, 0.00000),
(0.56471, 0.28235, 0.00000),
(0.57255, 0.28627, 0.00000),
(0.58039, 0.29020, 0.00000),
(0.58824, 0.29412, 0.00000),
(0.59608, 0.29804, 0.00000),
(0.60392, 0.30196, 0.00000),
(0.61176, 0.30588, 0.00000),
(0.61961, 0.30980, 0.00000),
(0.62745, 0.31373, 0.00000),
(0.63529, 0.31765, 0.00000),
(0.64314, 0.32157, 0.00000),
(0.65098, 0.32549, 0.00000),
(0.65882, 0.32941, 0.00000),
(0.66667, 0.33333, 0.00000),
(0.67451, 0.33725, 0.00000),
(0.68235, 0.34118, 0.00000),
(0.69020, | |
= self._get_param_datetime()
self.status = field_datetime > param_datetime
def get_message(self):
return self.message.format(VALUE=self.field_value, FIELD=self.field_name, DATETIME=self.args[0])
def _get_field_datetime(self):
return datetime.datetime.strptime(self.field_value, self.field_format_str)
def _get_param_datetime(self):
datetime_str = self.args[0] if len(self.args) == 1 else None
return datetime.datetime.strptime(datetime_str, self.param_format_str)
class Required(BaseRule):
name = 'required'
message = _('{FIELD} field is required')
description = _('the given field is required')
def check_null(self):
self.status = False
def check_value(self):
pass
class Accepted(BaseRule):
name = 'accepted'
message = _('{VALUE} of {FIELD} field must in which of : {FLAGS}')
flag = ['yes', 'no', 'true', 'false', '0', '1']
description = _('the given field must in the flag array')
def get_flag_str(self):
return ', '.join(self.flag)
def check_null(self):
pass
def check_value(self):
self.status = self.check_flag()
def check_flag(self):
flag = self.field_value.lower()
return flag in self.flag or flag in list(self.args)
def get_message(self):
return self.message.format(FIELD=self.field_name,
VALUE=self.field_value,
FLAGS=self.get_flag_str(),
RULE_NAME=self.name)
class Unique(BaseRule):
name = 'unique'
message = _('{VALUE} of {MODEL} with {MODEL_FIELD} is not unique')
description = _('the given value must unique of the table')
def check_null(self):
pass
def check_value(self):
self.status = self.check_model()
def check_model(self):
model_name, model_field = self.args
model = self.get_model(model_name)
qs = model.objects.filter(**{model_field: self.field_value})
return not qs.exists()
@staticmethod
def get_model(name):
from django.conf import settings
from django.apps import apps
if 'AUTH_USER_MODEL' == name:
app, name = settings.AUTH_USER_MODEL.split('.')
else:
app, name = name.split('.')
return apps.get_model(app, name)
def get_message(self):
return self.message.format(VALUE=self.field_value, FIELD=self.field_name,
MODEL=self.args[0],
MODEL_FIELD=self.args[1])
class AlphaDash(BaseRule):
name = 'alpha_dash'
message = _('{VALUE} is invalid alpha dash format string.')
regex = '[a-zA-Z-_]+'
description = _('The field under validation may have alpha-numeric characters, as well as dashes and underscores.')
def check_value(self):
self.status = re.match(self.regex, self.field_value)
def check_null(self):
pass
class AlphaNumber(BaseRule):
name = 'alpha_number'
message = _('{VALUE} is not a alpha-number string.')
regex = '[a-zA-Z0-9]+'
description = _('the given value must conbines with only alphabets and numbers ')
def check_value(self):
self.status = re.match(self.regex, self.field_value)
def check_null(self):
pass
class Array(BaseRule):
name = 'array'
message = _('{VALUE} is not a comma splited string')
description = _('the given must be a comma splited string.')
def check_value(self):
self.status = True if len(self.field_value.split(',')) > 2 else False
def check_null(self):
pass
class DateBeforeEqual(BaseRule):
name = 'date_before_equal'
message = _('{VALUE} of {FIELD} is not before or equal date {DATE}')
field_format_str = '%Y-%m-%d'
param_format_str = '%Y-%m-%d'
description = _('check the given value if before or equal the date time format of format_str')
def check_null(self):
pass
def check_value(self):
param_date = self._get_param_date()
field_date = self._get_field_date()
self.status = field_date <= param_date
def _get_param_date(self):
date_str = self.get_arg(0)
if not date_str:
raise RuleMissedParameterError(_('DateBefore Rule missed a paramter'))
param_format_str = self.get_arg(1) if self.get_arg(1) else self.param_format_str
date = datetime.datetime.strptime(date_str, param_format_str)
return date
def _get_field_date(self):
field_format_str = self.get_arg(2) if self.get_arg(2) else self.field_format_str
date = datetime.datetime.strptime(self.field_value, field_format_str)
return date
def get_message(self):
return self.message.format(VALUE=self.field_value,
FIELD=self.field_name,
DATE=self.args[0])
class DateAfterEqual(BaseRule):
name = 'date_after_equal'
message = _('{VALUE} of {FIELD} is not after or equal date {DATE}')
field_format_str = '%Y-%m-%d'
param_format_str = '%Y-%m-%d'
description = _('check the given value if after or equal the date time format of format_str')
def check_null(self):
pass
def check_value(self):
param_date = self._get_param_date()
field_date = self._get_field_date()
self.status = field_date >= param_date
def _get_param_date(self):
date_str = self.args[0]
if not date_str:
raise RuleMissedParameterError('date_after missed a parameter')
param_format_str = self.get_arg(1) if self.get_arg(1) else self.param_format_str
date = datetime.datetime.strptime(date_str, param_format_str)
return date
def _get_field_date(self):
field_format_str = self.get_arg(2) if self.get_arg(2) else self.field_format_str
date = datetime.datetime.strptime(self.field_value, field_format_str)
return date
def get_message(self):
return self.message.format(VALUE=self.field_value,
FIELD=self.field_name,
DATE=self.args[0])
class DateTimeBeforeEqual(BaseRule):
name = 'datetime_before_equal'
message = _('{VALUE} of {FIELD} is not before or equal {DATETIME}')
field_format_str = '%Y-%m-%d %H:%M:%S'
param_format_str = '%Y-%m-%d %H:%M:%S'
description = _('check the given value if before or equal the datetime format of format_str')
def check_null(self):
pass
def check_value(self):
field_datetime = self._get_field_datetime()
param_datetime = self._get_param_datetime()
self.status = field_datetime <= param_datetime
def get_message(self):
return self.message.format(VALUE=self.field_value, FIELD=self.field_name, DATETIME=self.args[0])
def _get_field_datetime(self):
return datetime.datetime.strptime(self.field_value, self.field_format_str)
def _get_param_datetime(self):
datetime_str = self.args[0] if len(self.args) == 1 else None
return datetime.datetime.strptime(datetime_str, self.param_format_str)
class DatetimeAfterEqual(BaseRule):
name = 'datetime_after_equal'
message = _('{VALUE} of {FIELD} is not after or equal {DATETIME}')
field_format_str = '%Y-%m-%d %H:%M:%S'
param_format_str = '%Y-%m-%d %H:%M:%S'
description = _('check the given value if after or equal the datetime format of format_str')
def check_null(self):
pass
def check_value(self):
field_datetime = self._get_field_datetime()
param_datetime = self._get_param_datetime()
self.status = field_datetime >= param_datetime
def get_message(self):
return self.message.format(VALUE=self.field_value, FIELD=self.field_name, DATETIME=self.args[0])
def _get_field_datetime(self):
return datetime.datetime.strptime(self.field_value, self.field_format_str)
def _get_param_datetime(self):
datetime_str = self.args[0] if len(self.args) == 1 else None
return datetime.datetime.strptime(datetime_str, self.param_format_str)
class Between(BaseRule):
name = 'between'
message = _('{VALUE} is not between of the {START} -> {END}')
description = _('check the given value if between the params, it\'s only adapted for integer and string value')
def check_value(self):
start, stop = self._get_params()
length = self.get_value_length()
self.status = start <= length <= stop
def get_value_length(self):
try:
length = int(self.field_value)
except ValueError:
length = len(self.field_value)
return length
def check_null(self):
pass
def get_message(self):
start, stop = self._get_params()
start, stop = stop, start if start > stop else None
return self.message.format(VALUE=self.field_value, START=start, STOP=stop)
def _get_params(self):
if len(self.args) < 2:
raise InvalidRuleParamterError(_('between rule needs 2 params.'))
else:
start = int(self.args[0])
stop = int(self.args[1])
return start, stop
class Boolean(BaseRule):
name = 'boolean'
message = _('{VALUE} can not covert to boolean type')
description = _('check the given value if boolean type')
type_ = ['0', '1', 'true', 'false']
def check_value(self):
self.status = True if self.field_value.strip().lower() in self.type_ else False
def check_null(self):
pass
class FileRuleMixin:
def check_value(self):
self._check_file()
def check_null(self):
pass
def _check_file(self):
ext = self._get_ext()
exts = self._get_exts()
self.status = ext in exts
def _check_ext(self):
ext = self._get_ext()
exts = self._get_exts()
return ext in exts
def _get_ext(self):
name = self.field_value.name
ext = name.split('.')[-1]
return ext
def _get_exts(self):
return self.exts
def get_message(self):
file_name = self.field_value.name
return self.message.format(FILE_NAME=file_name)
class File(FileRuleMixin, BaseRule):
name = 'file'
message = _('{FILE_NAME} is not allowed to upload')
description = _('check the uploaded file ext if allowed to upload this kind of file. ')
def _get_exts(self):
return self.args
class Image(File):
name = 'image'
exts = ['png', 'jpeg', 'gif', 'jpg', 'svg']
class Video(File):
name = 'video'
exts = ['mp4', 'avi', 'mkv', 'flv', 'rmvb']
class Audio(File):
name = 'audio'
exts = ['mp3', 'wma', 'flac', 'ape', 'ogg']
class Attachement(File):
name = 'attachement'
exts = ['doc', 'zip', 'ppt', 'docx', 'excel', 'rar']
class SizeMixin:
types = ['string', 'number', 'array', 'file']
def check_value(self):
_type = self.get_arg(0)
_size = self.get_arg(1)
if _type and _size and _type in self.types:
size = self._get_field_size(_type)
self._check_size(float(_size), float(size))
else:
raise InvalidRuleParamterError(_('invalid rule paramters'))
def check_null(self):
pass
def _check_size(self, *args, **kwargs):
raise NotImplementedError()
def _get_field_size(self, _type):
if 'string' == _type:
return self._get_str_size()
if 'number' == _type:
return self._get_number_size()
if 'array' == _type:
return self._get_array_size()
if 'file' == _type:
return self._get_file_size()
raise InvalidRuleParamterError(_('invalid rule parameters'))
def _get_str_size(self):
_value = str(self.field_value)
return len(_value)
def _get_number_size(self):
_value = float(self.field_value)
return _value
def _get_file_size(self):
size = self.field_value.size
return size / 1000
def _get_array_size(self):
_value = len(self.field_value.split(','))
return _value
def get_message(self):
_type = self.get_arg(0)
size = self._get_field_size(_type)
return self.message.format(FIELD=self.field_name, SIZE=size)
class Min(SizeMixin, BaseRule):
name = 'min'
message = _('size of {FIELD} is larger than {SIZE}')
description = _('')
def _check_size(self, _size, size):
self.status = _size <= size
class Max(SizeMixin, BaseRule):
name = 'max'
message = _('size of {FIELD} is smaller than {SIZE}')
description = _('')
def _check_size(self, _size, size):
self.status = _size >= size
class Size(SizeMixin, BaseRule):
name = 'size'
message = _('size of {FIELD} is not equals to {SIZE}')
description = _('The field under validation must have a size matching the given value. '
'For string data, value corresponds to the number of characters. '
'For numeric data, value corresponds to a given integer value. '
'For an array, size corresponds to the count of the array. '
'For files, size corresponds to the file size in kilobytes.')
def _check_size(self, _size, size):
self.status = _size == size
class Username(BaseRule):
name = 'username'
message = _('the input {VALUE} is not a proper username.')
description = _('this rule will check the normal username, the initial of username must be a alphabet character and'
'it could conbimes with digits, dot, underscore and dash.')
regex = r'^[a-z]{1}[a-z0-9\.\-_]*$'
def check_value(self):
self.status = True if re.fullmatch(self.regex, self.field_value) else False
def check_null(self):
pass
class Password(BaseRule):
name = 'password'
message = _('the input is not | |
<filename>utils/replace.py
import re
import copy
from utils.infos import Infos
class Replace:
def variableDeduplication(self, fileContent, funcDict, contractDict, contractFunctionDict):
#print(contractFunctionDict)
fileContentCopy = copy.deepcopy(fileContent)
pattern4Variables = re.compile(
r'(\W|\A)(address|bool|string|int|int8|int16|int24|int32|int40|int48|int56|int64|int72|int80|int88|int96|int104|int112|int120|int128|int136|int144|int152|int160|int168|int176|int184|int192|int200|int208|int216|int224|int232|int240|int248|int256|uint|uint8|uint16|uint24|uint32|uint40|uint48|uint56|uint64|uint72|uint80|uint88|uint96|uint104|uint112|uint120|uint128|uint136|uint144|uint152|uint160|uint168|uint176|uint184|uint192|uint200|uint208|uint216|uint224|uint232|uint240|uint248|uint256|byte|bytes|bytes1|bytes2|bytes3|bytes4|bytes5|bytes6|bytes7|bytes8|bytes9|bytes10|bytes11|bytes12|bytes13|bytes14|bytes15|bytes16|bytes17|bytes18|bytes19|bytes20|bytes21|bytes22|bytes23|bytes24|bytes25|bytes26|bytes27|bytes28|bytes29|bytes30|bytes31|bytes32|fixed|(fixed[0-9]+x[0-9]+)|ufixed|(ufixed[0-9]+x[0-9]+))\s+(public\s+|private\s+|internal\s+)?(\w+)')
currVar = {}
for pair in funcDict.items():
#print(pair)
currContract = Infos().getCurrBlock(fileContent, contractDict[pair[0]]['idx'])
for function in pair[1]:
varList = []
pattern4Function = re.compile(r'function\s' + function + '\(([^()]*)\)')
for line in currContract:
result4Function = pattern4Function.findall(line['line'])
if result4Function:
funcBlock = Infos().getCurrBlock(fileContent, line['idx'])
for b in funcBlock:
result4Variables = pattern4Variables.findall(b['line'])
if result4Variables:
for result in result4Variables:
if result[-1] not in varList:
varList.append(result[-1])
if varList:
function = pair[0] + '|' + function
currVar[function] = varList
contractVarList = []
for funcVarPair in currVar.items():
for i, v in enumerate(funcVarPair[1]):
v = v.replace('_', 'line')
count = contractVarList.count(v)
contractVarList.append(v)
currVar[funcVarPair[0]][i] = {currVar[funcVarPair[0]][i]: v + "X" + str(count)}
for funcVar in currVar.items():
functionBlock = Infos().getCurrBlock(fileContent, contractFunctionDict[funcVar[0]]['idx'])
for code in functionBlock:
for varPair in funcVar[1]:
for p in varPair.items():
l = fileContentCopy[code['idx']]
#print(p)
pattern4Argv = re.compile(r'[^\.\w]' + (p[0]) + '\W+')
result4Argv = pattern4Argv.findall(l)
if result4Argv:
#print(result4Argv)
for r in result4Argv:
keyStart = r.find(p[0])
keyEnd = r.find(p[0]) + len(p[0])
subKey = r[0:keyStart] + p[1] + r[keyEnd:]
l = l[0:l.find(r)] + subKey + l[l.find(r) + len(r):]
#print(l)
fileContentCopy[code['idx']] = l
deduplicatedFile = fileContentCopy
return deduplicatedFile
def argvReplace(self, matchLine, callFunc, contractFuntionStr, currArgvs, countCall, contractFunctionDict, fileContent, varType):
pattern4Function = re.compile(r'\A(function|modifier)\W+' + callFunc + '\s?\(([^()]*)\)')
pattern4IfCallFunction = re.compile(r'(if|assert|require)\s*\(.*\W*(' + callFunc + '\s*\([^)]*\))')
pattern4MultiFunctionCall = re.compile(r'\W(' + callFunc + '\s*\([^)]*\))(\.\w+\([^)]*\))')
pattern4Assign = re.compile(r'[^\=]+\=[^\=]+' + callFunc + '\s*\(')
pattern4Return = re.compile(r'return\W')
argvs = []
argvDict = {}
functionBlock = Infos().getCurrBlock(fileContent, contractFunctionDict[contractFuntionStr]['idx'])
isIfCall = 0
isMultiCall = 0
isAssign = 0
isReturn = 0
for idx, item in enumerate(functionBlock):
functionBlock[idx] = item['line']
result4Function = pattern4Function.findall(item['line'].strip())
if result4Function:
argvs = result4Function[0][1].split(',')
for idx, a in enumerate(argvs):
a = a.strip().split(' ')[-1].strip()
#a = re.sub(re.compile('^\s+|\s+$'), '', a).split(' ')[-1]
if a:
argvs[idx] = a
#print(argvs)
#function call in if statement, assertion, require
result4IfCallFunction = pattern4IfCallFunction.findall(matchLine.strip())
pattern4CallExternalFunction = re.compile(r'([\w\.\[\]]+)\.(\w+\([^()]*\))')
#print(matchLine)
if result4IfCallFunction:
isIfCall = 1
typeStr = "bool "
if varType:
typeStr = varType + " "
callStart = matchLine.find(result4IfCallFunction[0][1].strip())
callEnd = callStart + len(result4IfCallFunction[0][1].strip())
if matchLine[callStart-1] == ".":
result4CallExternalFunction = pattern4CallExternalFunction.findall(matchLine)
if result4CallExternalFunction:
callStart = matchLine.find(result4CallExternalFunction[0][0]+"."+result4CallExternalFunction[0][1])
subString1 = "substituteVariable" + "X" + callFunc + str(countCall)
subString2 = typeStr + subString1 + ";\n"
modifiedIfLine = matchLine[0:callStart] + subString1 + matchLine[callEnd:] + "\n"
#print(subString2)
newMatchLine = subString1 + " = " + result4IfCallFunction[0][1].strip()
matchLine = newMatchLine
appendLineBeforeList = []
appendLineAfterList = []
appendLineBeforeList.append(subString2)
appendLineAfterList.append(modifiedIfLine)
#print(matchLine)
#function call in format like "A(b, c).call()"
result4MultiFunctionCall = pattern4MultiFunctionCall.findall(matchLine.strip())
if result4MultiFunctionCall:
isMultiCall = 1
suffixStr = result4MultiFunctionCall[0][1]
#print(matchLine)
result4Assign = pattern4Assign.findall(matchLine.strip())
if result4Assign:
#print(matchLine)
isAssign = 1
assignPrefix = result4Assign[0].split("=")[0]
result4Return = pattern4Return.findall(matchLine.strip())
if result4Return:
#print(matchLine)
isReturn = 1
#substitution
#print("matchLine", matchLine)
if currArgvs:
if len(argvs) > len(currArgvs):
argvs = argvs[0:len(currArgvs)]
for idx, item in enumerate(argvs):
if item:
argvDict[item] = currArgvs[idx]
#print(argvDict)
pattern4ReturnBool = re.compile(r'return\s+\W*(true|false)\W')
pattern4ReturnLine = re.compile(r'(return)(\W+\s*.+\;)')
pattern4ReturnOneVal = re.compile(r'return\s+(.*)\;')
for index, line in enumerate(functionBlock[1:-1]):
#print(line)
result4ReturnBool = pattern4ReturnBool.findall(line.strip())
for pair in argvDict.items():
#print(pair)
pattern4Argv = re.compile(r'[^\.\w]' + pair[0] + '\W')
result4Argv = pattern4Argv.findall(line.strip())
if result4Argv and (pair[0] != pair[1]):
for singleResult in result4Argv:
l = functionBlock[index+1]
subGoal = singleResult[0] + pair[1] + singleResult[-1]
startIdx = l.find(singleResult)
endIdx = startIdx + len(singleResult)
functionBlock[index+1] = l[0:startIdx] + subGoal + l[endIdx:]
#print(functionBlock[index+1])
result4ReturnLine = pattern4ReturnLine.findall(functionBlock[index+1].strip())
if result4ReturnLine:
if isAssign and not isMultiCall:
#print(result4ReturnLine)
functionBlock[index+1] = pattern4ReturnLine.sub( assignPrefix + '=' + result4ReturnLine[0][1], functionBlock[index+1])
#print(functionBlock[index+1])
elif isMultiCall:
result4ReturnOneVal = pattern4ReturnOneVal.findall(line.strip())
if result4ReturnOneVal:
strStart = line.find(line.strip())
functionBlock[index+1] = line[0:strStart] + result4ReturnOneVal[0].strip() + suffixStr + ";\n"
elif isReturn:
pass
else:
functionBlock[index+1] = ""
else:
pass
#while functionB has been call by functionA for multi-times
if countCall:
pattern4Variables = re.compile(r'(address|bool|string|int|int8|int16|int24|int32|int40|int48|int56|int64|int72|int80|int88|int96|int104|int112|int120|int128|int136|int144|int152|int160|int168|int176|int184|int192|int200|int208|int216|int224|int232|int240|int248|int256|uint|uint8|uint16|uint24|uint32|uint40|uint48|uint56|uint64|uint72|uint80|uint88|uint96|uint104|uint112|uint120|uint128|uint136|uint144|uint152|uint160|uint168|uint176|uint184|uint192|uint200|uint208|uint216|uint224|uint232|uint240|uint248|uint256|byte|bytes|bytes1|bytes2|bytes3|bytes4|bytes5|bytes6|bytes7|bytes8|bytes9|bytes10|bytes11|bytes12|bytes13|bytes14|bytes15|bytes16|bytes17|bytes18|bytes19|bytes20|bytes21|bytes22|bytes23|bytes24|bytes25|bytes26|bytes27|bytes28|bytes29|bytes30|bytes31|bytes32|fixed|(fixed[0-9]+x[0-9]+)|ufixed|(ufixed[0-9]+x[0-9]+))\s+(\w*)')
duplicatedVar = {}
for f in functionBlock[1:-1]:
result4Variables = pattern4Variables.findall(f.strip())
if result4Variables:
charIdx = 96 + countCall
duplicatedVar[result4Variables[0][-1]] = result4Variables[0][-1] + str(charIdx) + chr(charIdx)
for p in duplicatedVar.items():
pattern4Var = re.compile(r'[^\.\w]' + p[0] + '\W')
for index, line in enumerate(functionBlock[1:-1]):
result4Var = pattern4Var.findall(line.strip())
if result4Var and (p[0] != p[1]):
for singleResult in result4Var:
l = functionBlock[index+1]
subGoal = singleResult[0] + p[1] + singleResult[-1]
startIdx = l.find(singleResult)
endIdx = startIdx + len(singleResult)
functionBlock[index+1] = l[0:startIdx] + subGoal + l[endIdx:]
#print(functionBlock[1])
replacedBlock = functionBlock[1:-1]
if isIfCall:
replacedBlock = appendLineBeforeList + functionBlock[1:-1] + appendLineAfterList
#print(replacedBlock)
return replacedBlock
def isIteratedFunc(self, contractFuntionStr, contractFunctionDict, deduplicatedFile):
pattern4Function = re.compile(r'\Afunction\W+([^\(]*)')
if contractFunctionDict.__contains__(contractFuntionStr):
searchBlock = Infos().getCurrBlock(deduplicatedFile, contractFunctionDict[contractFuntionStr]['idx'])
result4Function = pattern4Function.findall(searchBlock[0]['line'].strip())
currFunc = result4Function[0]
pattern4Iterate = re.compile(r'\W?' + currFunc + '\(')
for line in searchBlock[1:]:
result4Iterate = pattern4Iterate.findall(line['line'])
if result4Iterate:
return True
return False
def functionReplace(self, contractDict, fileContent, modSearchOrder, modDict, contractFunctionDict, searchPath, contractElemLibDict, libFunctionDict):
pattern4Contract = re.compile(r'\Acontract\W+(\w*)')
pattern4Function = re.compile(r'\Afunction\W+([^\(]*)')
pattern4Library = re.compile(r'\Alibrary\s+(\w+)\s*\{')
pattern4Modifier = re.compile(r'\Amodifier\W+([^\(]*)')
pattern4Require = re.compile(r'\A(require|assert)\s*\(')
pattern4ElementaryTypeName = re.compile(r'(address|bool|string|int|int8|int16|int24|int32|int40|int48|int56|int64|int72|int80|int88|int96|int104|int112|int120|int128|int136|int144|int152|int160|int168|int176|int184|int192|int200|int208|int216|int224|int232|int240|int248|int256|uint|uint8|uint16|uint24|uint32|uint40|uint48|uint56|uint64|uint72|uint80|uint88|uint96|uint104|uint112|uint120|uint128|uint136|uint144|uint152|uint160|uint168|uint176|uint184|uint192|uint200|uint208|uint216|uint224|uint232|uint240|uint248|uint256|byte|bytes|bytes1|bytes2|bytes3|bytes4|bytes5|bytes6|bytes7|bytes8|bytes9|bytes10|bytes11|bytes12|bytes13|bytes14|bytes15|bytes16|bytes17|bytes18|bytes19|bytes20|bytes21|bytes22|bytes23|bytes24|bytes25|bytes26|bytes27|bytes28|bytes29|bytes30|bytes31|bytes32|fixed|(fixed[0-9]+x[0-9]+)|ufixed|(ufixed[0-9]+x[0-9]+))(\s+|\s*\()')
pattern4Instantiation1 = re.compile(r'\W\s*new\s+(\w*)\s*\(')
funcDict = {}
contract = ''
saveContractList = []
for line in fileContent:
result4Contract = pattern4Contract.findall(line.strip())
result4Function = pattern4Function.findall(line.strip())
if result4Contract:
contract = result4Contract[0].strip()
funcDict.setdefault(contract, [])
if result4Function and contract:
func = result4Function[0].strip()
funcDict[contract].append(func)
#print(contractElemLibDict)
deduplicatedFile = self.variableDeduplication(fileContent, funcDict, contractDict, contractFunctionDict)
dFileCopy = copy.deepcopy(deduplicatedFile)
contractVarDict = Infos().varTypeRecord(contractDict, deduplicatedFile)
#print(contractVarDict)
pattern4CallFunc = re.compile(r'(\w+\([^()]*\))')
pattern4Constructor = re.compile(r'constructor')
#pattern4CallExternalFunction = re.compile(r'([^\w\.\[\]]\w.+)\.(\w+\([^()]*\))')
pattern4CallExternalFunction = re.compile(r'([\w\.\[\]]+)\.(\w+\([^()]*\))')
callFunctionDict = {}
callModifierDict = {}
currContract = ''
currFunc = ''
calledFunctionList = []
delFunctionList = []
flag = 0
end = 0
while not end:
for idx, line in enumerate(dFileCopy[flag:]):
#print("o:", idx+flag)
#print(line)
varType = ""
result4Instantiation1 = pattern4Instantiation1.findall(line.strip())
result4CallFunc = pattern4CallFunc.findall(line.strip())
result4Constructor = pattern4Constructor.findall(line.strip())
result4Contract = pattern4Contract.findall(line.strip())
result4Library = pattern4Library.findall(line.strip())
result4Require = pattern4Require.findall(line.strip())
result4CurrFunction = pattern4Function.findall(line.strip())
result4CurrModifier = pattern4Modifier.findall(line.strip())
result4CallExternalFunction = pattern4CallExternalFunction.findall(line.strip())
result4ElementaryTypeName = pattern4ElementaryTypeName.findall(line.strip())
if result4Instantiation1 and result4Instantiation1[0] not in saveContractList:
saveContractList.append(result4Instantiation1[0])
if result4Contract:
currContract = result4Contract[0]
if result4Library:
currContract = ''
if result4CurrFunction and currContract:
currFunc = result4CurrFunction[0]
#print(currFunc)
callFunctionDict.setdefault(currContract + '|' + currFunc, [])
# generate modifier dict
if modSearchOrder.__contains__(currContract):
for m in modSearchOrder[currContract]:
mod = m.split('|')[1]
patter4ModifierCall = re.compile(r'function\s(.*)\s' + mod + '\W')
result4ModifierCall = patter4ModifierCall.findall(line.strip())
if result4ModifierCall:
#print(line)
callModifierDict.setdefault(currContract + '|' + currFunc, [])
if m not in callModifierDict[currContract + '|' + currFunc]:
list = []
for i in callModifierDict[currContract + '|' + currFunc]:
list.append(i.split('|')[1])
if mod not in list:
callModifierDict[currContract + '|' + currFunc].append(m)
if result4Constructor:
currFunc = result4Constructor[0]
if result4CallExternalFunction and currContract and currFunc and not result4CurrFunction and not result4Contract:
caller = result4CallExternalFunction[0][0].strip()
if caller.count("[") > caller.count("]"):
caller = caller.split("[")[-1]
callFunc = result4CallExternalFunction[0][1].split('(')[0]
NcontractList, contractDict, NlibDict, NlibFunctionDict, NmainContract, NmodDict, NnewContractFunctionDict, NcontractConstructorDict = Infos().findAllContracts(dFileCopy)
#contractVarDict = Infos().varTypeRecord(contractDict, dFileCopy)
if caller == 'super':
for target in searchPath[currContract][1:]:
searchStr = target + '|' + callFunc
if contractFunctionDict.__contains__(searchStr):
caller = target
break
else:
pass
else:
searchBlock = Infos().getCurrBlock(dFileCopy, contractDict[currContract]['idx'])
for k in contractDict.keys():
pattern4ContractRef = re.compile(r''+ k +'\s+(public\s+)?' + caller + '\s*\;')
for s in searchBlock:
result4ContractRef = pattern4ContractRef.findall(s['line'])
if result4ContractRef:
caller = k
if k not in saveContractList:
saveContractList.append(k)
if contractDict.__contains__(caller):
argvStr = result4CallExternalFunction[0][1].split('(')[1].replace(')', '')
if argvStr:
currArgvs = argvStr.split(',')
for cArgvIdx, cArgv in enumerate(currArgvs):
currArgvs[cArgvIdx] = cArgv.strip()
else:
currArgvs = []
callFunctionDict.setdefault(currContract + '|' + currFunc, [])
countCall = callFunctionDict[currContract + '|' + currFunc].count(callFunc)
callFunctionDict[currContract + '|' + currFunc].append(callFunc)
contractFuntionStr = caller + '|' + callFunc
if contractFunctionDict.__contains__(contractFuntionStr):
isIterated = False
isIterated = self.isIteratedFunc(contractFuntionStr, contractFunctionDict, deduplicatedFile)
if isIterated:
continue
#print(line)
contractVarList = [cvk for cvk in contractVarDict.keys()]
for cVar in contractVarList:
if cVar.split("|")[0] == caller:
vName = cVar.split("|")[1]
if not contractVarDict.__contains__(currContract + "|" + vName):
vType = contractVarDict[cVar]["type"]
contractVarDict[currContract + "|" + vName] = {"var": vName, "type": vType}
contractElemLibList = [celk for celk in contractElemLibDict.keys()]
for cELib in contractElemLibList:
if cELib.split("|")[0] == caller:
ketStr = currContract + "|" + cELib.split("|")[1]
if not contractElemLibDict.__contains__(ketStr):
libFunc = contractElemLibDict[cELib]
contractElemLibDict[ketStr] = libFunc
block = self.argvReplace(line, callFunc, contractFuntionStr, currArgvs, countCall, contractFunctionDict, deduplicatedFile, varType)
if contractFuntionStr not in calledFunctionList:
calledFunctionList.append(contractFuntionStr)
# indent
stringStart = len(line) - len(line.lstrip())
lineStart = 0
for bIdx, bLine in enumerate(block):
if bLine and not lineStart:
lineStart = len(bLine) - len(bLine.lstrip())
block[bIdx] = line[0:stringStart] + bLine[lineStart-1:]
dFileCopy = dFileCopy[0:flag + idx] + block + dFileCopy[flag + idx+1:]
block = []
flag = flag | |
<reponame>kedarisetti/mp4viewer
from __future__ import absolute_import
import sys
from . import box
class MovieHeader(box.FullBox):
def parse(self, buf):
super(MovieHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.timescale = buf.readint32()
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.timescale = buf.readint32()
self.duration = buf.readint32()
self.rate = buf.readint32()
self.volume = buf.readint16()
buf.skipbytes(2 + 8)
self.matrix = [[buf.readint32() for j in range(3)] for i in range(3)]
buf.skipbytes(24)
self.next_track_id = buf.readint32()
def generate_fields(self):
for x in super(MovieHeader, self).generate_fields():
yield x
from .utils import get_utc_from_seconds_since_1904
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.creation_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("timescale", self.timescale)
yield ("duration", self.duration)
yield ("rate", "0x%08X" %(self.rate))
yield ("volume", "0x%04X" %(self.volume))
yield ("matrix", self.matrix)
yield ("next track id", self.next_track_id)
class TrackHeader(box.FullBox):
def parse(self, buf):
super(TrackHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.track_id = buf.readint32()
buf.skipbytes(4)
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.track_id = buf.readint32()
buf.skipbytes(4)
self.duration = buf.readint32()
buf.skipbytes(8)
self.layer = buf.readint16()
self.altgroup = buf.readint16()
self.volume = buf.readint16()
buf.skipbytes(2)
self.matrix = [[buf.readint32() for j in range(3)] for i in range(3)]
self.width = buf.readint32()
self.height = buf.readint32()
def generate_fields(self):
for x in super(TrackHeader, self).generate_fields():
yield x
from .utils import get_utc_from_seconds_since_1904
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.modification_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("track id", self.track_id)
yield ("duration", self.duration)
yield ("layer", "0x%04X" %(self.layer))
yield ("alternate group", "0x%04X" %(self.altgroup))
yield ("volume", "0x%04X" %(self.volume))
yield ("matrix", self.matrix)
yield ("width", self.width)
yield ("height", self.height)
class MediaHeader(box.FullBox):
def parse(self, buf):
super(MediaHeader, self).parse(buf)
if self.version == 1:
self.creation_time = buf.readint64()
self.modification_time = buf.readint64()
self.timescale = buf.readint32()
self.duration = buf.readint64()
else:
self.creation_time = buf.readint32()
self.modification_time = buf.readint32()
self.timescale = buf.readint32()
self.duration = buf.readint32()
self.language = buf.readint16() & 0x7FFF
buf.skipbytes(2)
def generate_fields(self):
from .utils import parse_iso639_2_15bit
from .utils import get_utc_from_seconds_since_1904
for x in super(MediaHeader, self).generate_fields():
yield x
yield ("creation time", self.creation_time, get_utc_from_seconds_since_1904(self.creation_time).ctime())
yield ("modification time", self.modification_time, get_utc_from_seconds_since_1904(self.modification_time).ctime())
yield ("timescale", self.timescale)
yield ("duration", self.duration)
yield ("language", self.language, parse_iso639_2_15bit(self.language))
class VideoMediaHeader(box.FullBox):
def parse(self, buf):
super(VideoMediaHeader, self).parse(buf)
self.graphicsmode = buf.readint16()
self.opcolor = []
for i in range(0,3):
self.opcolor.append(buf.readint16())
def generate_fields(self):
for x in super(VideoMediaHeader, self).generate_fields():
yield x
yield ("graphics mode", self.graphicsmode)
yield ("opcolor", self.opcolor)
class SoundMediaHeader(box.FullBox):
def parse(self, buf):
super(SoundMediaHeader, self).parse(buf)
self.balance = buf.readint16()
buf.skipbytes(2)
def generate_fields(self):
for x in super(SoundMediaHeader, self).generate_fields():
yield x
yield ("balance", self.balance)
class HintMediaHeader(box.FullBox):
def parse(self, buf):
super(HintMediaHeader, self).parse(buf)
self.max_pdu_size = buf.readint16()
self.avg_pdu_size = buf.readint16()
self.max_bitrate = buf.readint16()
self.avg_bitrate = buf.readint16()
def generate_fields(self):
for x in super(HintMediaHeader, self).generate_fields():
yield x
yield ("Max PDU size", self.max_pdu_size)
yield ("Average PDU size", self.avg_pdu_size)
yield ("Max bitrate", self.max_bitrate)
yield ("Average bitrate", self.avg_bitrate)
class HandlerBox(box.FullBox):
def parse(self, buf):
super(HandlerBox, self).parse(buf)
buf.skipbytes(4)
self.handler = buf.readstr(4)
buf.skipbytes(12)
self.consumed_bytes += 20
self.name = buf.read_cstring(self.size - self.consumed_bytes)[0]
def generate_fields(self):
for x in super(HandlerBox, self).generate_fields():
yield x
yield ("handler", self.handler)
yield ("name", self.name if len(self.name) else '<empty>')
class SampleEntry(box.Box):
def parse(self, buf):
super(SampleEntry, self).parse(buf)
buf.skipbytes(6)
self.data_ref_index = buf.readint16()
self.consumed_bytes += 8
def generate_fields(self):
for x in super(SampleEntry, self).generate_fields():
yield x
yield ("data reference index", self.data_ref_index)
class HintSampleEntry(SampleEntry):
def parse(self, buf):
buf.skipbytes(self.size - self.consumed_bytes)
class VisualSampleEntry(SampleEntry):
def parse(self, buf):
super(VisualSampleEntry, self).parse(buf)
buf.skipbytes(2 + 2 + 3 * 4)
self.width = buf.readint16()
self.height = buf.readint16()
self.hori_resolution = buf.readint32()
self.vert_resolution = buf.readint32()
buf.skipbytes(4)
self.frame_count = buf.readint16()
compressor_name_length = buf.readbyte()
self.compressor_name = buf.readstr(compressor_name_length) if compressor_name_length else ''
buf.skipbytes(32 - compressor_name_length - 1)
self.depth = buf.readint16()
buf.skipbytes(2)
self.has_children = True
def generate_fields(self):
for x in super(VisualSampleEntry, self).generate_fields():
yield x
yield ("width", self.width)
yield ("height", self.height)
yield ("horizontal resolution", "0x%08X" %(self.hori_resolution))
yield ("vertical resolution", "0x%08X" %(self.vert_resolution))
yield ("frame count", self.frame_count)
yield ("compressor name", self.compressor_name)
yield ("depth", self.depth)
class AudioSampleEntry(SampleEntry):
def parse(self, buf):
super(AudioSampleEntry, self).parse(buf)
# 14496-12 says first eight bits are reserved.
# Apple QuickTime format (MOV) uses those bytes for version, revision and vendor
# The size of this box in QT varies according to the version, so we need the version
self.quicktime_version = buf.readint16()
buf.skipbytes(6)
self.channel_count = buf.readint16()
self.sample_size = buf.readint16()
buf.skipbytes(4)
self.sample_rate = buf.readint32()
if self.quicktime_version == 1:
self.samples_per_pkt = buf.readint32()
self.bytes_per_pkt = buf.readint32()
self.bytes_per_frame = buf.readint32()
self.bytes_per_sample = buf.readint32()
elif self.quicktime_version == 2:
buf.skipbytes(36)
self.has_children = True
def generate_fields(self):
for x in super(AudioSampleEntry, self).generate_fields():
yield x
yield ("channel count", self.channel_count)
yield ("sample size", self.sample_size)
yield ("sample rate", self.sample_rate, "%d, %d" %(self.sample_rate >> 16, self.sample_rate & 0xFFFF))
class SampleDescription(box.FullBox):
def parse(self, buf):
super(SampleDescription, self).parse(buf)
media = self.find_parent('mdia')
hdlr = media.find_child('hdlr') if media else None
handler = hdlr.handler if hdlr else None
self.entry_count = buf.readint32()
for i in range(self.entry_count):
if handler == 'soun':
self.children.append(AudioSampleEntry(buf))
elif handler == 'vide':
self.children.append(VisualSampleEntry(buf))
elif handler == 'hint':
self.children.append(HintSampleEntry(buf))
else:
entry = box.Box(buf)
self.children.append(entry)
buf.skipbytes(entry.size - entry.consumed_bytes)
if len(self.children) != 0:
self.has_children = True
def generate_fields(self):
for x in super(SampleDescription, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
class DataEntryUrnBox(box.FullBox):
def parse(self, buf):
super(DataEntryUrnBox, self).parse(buf)
self.name = buf.read_cstring()[0]
self.location = buf.read_cstring()[0]
def generate_fields(self):
for x in super(DataEntryUrnBox, self).generate_fields():
yield x
yield ("name", self.name)
yield ("location", self.location)
class DataEntryUrlBox(box.FullBox):
def parse(self, buf):
super(DataEntryUrlBox, self).parse(buf)
self.location = buf.read_cstring(self.size - self.consumed_bytes)[0]
def generate_fields(self):
for x in super(DataEntryUrlBox, self).generate_fields():
yield x
yield ("location", self.location)
class DataReferenceBox(box.FullBox):
def parse(self, buf):
super(DataReferenceBox, self).parse(buf)
self.entry_count = buf.readint32()
self.has_children = True
for i in range(self.entry_count):
self.children.append(box.Box.getnextbox(buf, self))
def generate_fields(self):
for x in super(DataReferenceBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
class TimeToSampleBox(box.FullBox):
def parse(self, buf):
super(TimeToSampleBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = []
for i in range(self.entry_count):
count = buf.readint32()
delta = buf.readint32()
self.entries.append((count, delta))
def generate_fields(self):
for x in super(TimeToSampleBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
for entry in self.entries:
yield ("sample count", entry[0])
yield ("sample delta", entry[1])
class SampleToChunkBox(box.FullBox):
def parse(self, buf):
super(SampleToChunkBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = []
for i in range(self.entry_count):
first = buf.readint32()
samples_per_chunk = buf.readint32()
sdix = buf.readint32()
self.entries.append((first, samples_per_chunk, sdix))
def generate_fields(self):
for x in super(SampleToChunkBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
for entry in self.entries:
yield ("first chunk", entry[0])
yield ("samples per chunk", entry[1])
yield ("sample description index", entry[2])
class ChunkOffsetBox(box.FullBox):
def parse(self, buf):
super(ChunkOffsetBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = [buf.readint32() for i in range(self.entry_count)]
def generate_fields(self):
for x in super(ChunkOffsetBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
yield ("chunk offsets", self.entries)
class SyncSampleBox(box.FullBox):
def parse(self, buf):
super(SyncSampleBox, self).parse(buf)
self.entry_count = buf.readint32()
self.entries = [buf.readint32() for i in range(self.entry_count)]
def generate_fields(self):
for x in super(SyncSampleBox, self).generate_fields():
yield x
yield ("entry count", self.entry_count)
yield ("sample numbers", self.entries)
class SampleSizeBox(box.FullBox):
def parse(self, buf):
super(SampleSizeBox, self).parse(buf)
self.sample_size = buf.readint32()
self.sample_count = buf.readint32()
if self.sample_size == 0:
self.entries = [buf.readint32() for i in range(self.sample_count)]
else:
self.entries = []
def generate_fields(self):
for x in super(SampleSizeBox, self).generate_fields():
yield x
yield ("sample size", self.sample_size)
yield ("sample count", self.sample_count)
if self.sample_size == 0:
yield ("sample sizes", self.entries)
class CompactSampleSizeBox(box.FullBox):
def parse(self, buf):
super(CompactSampleSizeBox, self).parse(buf)
buf.skipbytes(3)
self.field_size = buf.readbyte()
self.sample_count = buf.readint32()
self.entries = [buf.readbits(self.field_size) for i in range(self.sample_count)]
# skip padding bits
if self.field_size == 4 and self.sample_count % 2 != 0:
buf.readbits(4)
def generate_fields(self):
for x in super(CompactSampleSizeBox, self).generate_fields():
yield x
yield ("field size", self.sample_size)
yield ("sample count", self.sample_count)
yield ("entries", self.entries)
class MovieExtendsHeader(box.FullBox):
def parse(self, buf):
super(MovieExtendsHeader, self).parse(buf)
if self.version == 1:
self.fragment_duration = buf.readint64()
else:
self.fragment_duration = buf.readint32()
def generate_fields(self):
for x in super(MovieExtendsHeader, self).generate_fields():
yield x
yield ("Fragment duration", self.fragment_duration)
class TrackExtendsBox(box.FullBox):
def parse(self, buf):
super(TrackExtendsBox, self).parse(buf)
self.track_id = buf.readint32()
self.default_sample_description_index = buf.readint32()
self.default_sample_duration = buf.readint32()
self.default_sample_size = buf.readint32()
self.default_sample_flags = buf.readint32()
def generate_fields(self):
for x in super(TrackExtendsBox, self).generate_fields():
yield x
yield ("Track ID", self.track_id)
yield ("Default sample description index", self.default_sample_description_index)
yield ("Default sample duration", self.default_sample_duration)
yield ("Default sample size", self.default_sample_size)
yield ("Default sample flags", self.default_sample_flags)
class AvcCBox(box.Box):
def parse(self, buf):
super(AvcCBox, self).parse(buf)
self.configuration_level = buf.readbyte()
self.profile = buf.readbyte()
self.profile_compatibility = buf.readbyte()
self.level = buf.readbyte()
buf.readbits(6)
self.len_minus_1 = buf.readbits(2)
buf.readbits(3)
self.sps = []
num_of_sps = buf.readbits(5)
for x in xrange(num_of_sps):
sps_len = buf.readint16()
self.sps.append(buf.readstr(sps_len))
self.pps = []
num_of_pps = buf.readbyte()
for x in xrange(num_of_pps):
pps_len = buf.readint16()
self.pps.append(buf.readstr(pps_len))
self.has_children = False
def generate_fields(self):
for x in super(AvcCBox, self).generate_fields():
yield x
yield ("Confiuration level", self.configuration_level)
yield ("Profile", self.profile)
yield ("Profile compatibility", self.profile_compatibility)
yield ("Level", | |
ext=1, sky_iter=10, iter_atol=1.e-4):
"""Subtract sky background from grism exposures
Implementation of grism sky subtraction from ISR 2015-17
TBD
"""
import numpy.ma
import scipy.ndimage as nd
#from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
### Figure out which grism
im = pyfits.open(grism['files'][0])
grism_element = utils.get_hst_filter(im[0].header)
flat = 1.
if grism_element == 'G141':
bg_fixed = ['zodi_G141_clean.fits']
bg_vary = ['zodi_G141_clean.fits', 'excess_lo_G141_clean.fits',
'G141_scattered_light.fits'][1:]
isACS = False
elif grism_element == 'G102':
bg_fixed = ['zodi_G102_clean.fits']
bg_vary = ['excess_G102_clean.fits']
isACS = False
elif grism_element == 'G280':
bg_fixed = ['UVIS.G280.flat.fits']
bg_vary = ['UVIS.G280.ext{0:d}.sky.fits'.format(ext)]
isACS = True
flat = 1.
elif grism_element == 'G800L':
bg_fixed = ['ACS.WFC.CHIP{0:d}.msky.1.smooth.fits'.format({1:2,2:1}[ext])]
bg_vary = ['ACS.WFC.flat.fits']
#bg_fixed = ['ACS.WFC.CHIP%d.msky.1.fits' %({1:2,2:1}[ext])]
#bg_fixed = []
isACS = True
flat_files = {'G800L':'n6u12592j_pfl.fits'} # F814W
flat_file = flat_files[grism_element]
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
flat = flat_im['SCI',ext].data.flatten()
if verbose:
print('{0}: EXTVER={1:d} / {2} / {3}'.format(grism['product'], ext, bg_fixed, bg_vary))
if not isACS:
ext = 1
### Read sky files
data_fixed = []
for file in bg_fixed:
im = pyfits.open('{0}/CONF/{1}'.format(GRIZLI_PATH, file))
sh = im[0].data.shape
data = im[0].data.flatten()/flat
data_fixed.append(data)
data_vary = []
for file in bg_vary:
im = pyfits.open('{0}/CONF/{1}'.format(GRIZLI_PATH, file))
data_vary.append(im[0].data.flatten()*1)
sh = im[0].data.shape
yp, xp = np.indices(sh)
### Hard-coded (1014,1014) WFC3/IR images
Npix = sh[0]*sh[1]
Nexp = len(grism['files'])
Nfix = len(data_fixed)
Nvary = len(data_vary)
Nimg = Nexp*Nvary + Nfix
A = np.zeros((Npix*Nexp, Nimg), dtype=np.float32)
data = np.zeros(Npix*Nexp, dtype=np.float32)
wht = data*0.
mask = data > -1
medians = np.zeros(Nexp)
exptime = np.ones(Nexp)
### Build combined arrays
if isACS:
bits = 64+32
else:
bits = 576
for i in range(Nexp):
flt = pyfits.open(grism['files'][i])
dq = utils.unset_dq_bits(flt['DQ',ext].data, okbits=bits)
dq_mask = dq == 0
## Data
data[i*Npix:(i+1)*Npix] = (flt['SCI',ext].data*dq_mask).flatten()
mask[i*Npix:(i+1)*Npix] &= dq_mask.flatten() #== 0
wht[i*Npix:(i+1)*Npix] = 1./(flt['ERR',ext].data**2*dq_mask).flatten()
wht[~np.isfinite(wht)] = 0.
if isACS:
exptime[i] = flt[0].header['EXPTIME']
data[i*Npix:(i+1)*Npix] /= exptime[i]
wht[i*Npix:(i+1)*Npix] *= exptime[i]**2
medians[i] = np.median(flt['SCI',ext].data[dq_mask]/exptime[i])
else:
medians[i] = np.median(flt['SCI',ext].data[dq_mask])
## Fixed arrays
for j in range(Nfix):
for k in range(Nexp):
A[k*Npix:(k+1)*Npix,j] = data_fixed[j]
mask_j = (data_fixed[j] > 0) & np.isfinite(data_fixed[j])
mask[i*Npix:(i+1)*Npix] &= mask_j
## Variable arrays
for j in range(Nvary):
k = Nfix+j+Nvary*i
A[i*Npix:(i+1)*Npix,k] = data_vary[j]
mask[i*Npix:(i+1)*Npix] &= np.isfinite(data_vary[j])
### Initial coeffs based on image medians
coeffs = np.array([np.min(medians)])
if Nvary > 0:
coeffs = np.hstack((coeffs, np.zeros(Nexp*Nvary)))
coeffs[1::Nvary] = medians-medians.min()
model = np.dot(A, coeffs)
coeffs_0 = coeffs
for iter in range(sky_iter):
model = np.dot(A, coeffs)
resid = (data-model)*np.sqrt(wht)
obj_mask = (resid < 2.5) & (resid > -3)
for j in range(Nexp):
obj_j = nd.minimum_filter(obj_mask[j*Npix:(j+1)*Npix], size=30)
obj_mask[j*Npix:(j+1)*Npix] = (obj_j > 0).flatten()
if False:
j = 1
mask_i = (obj_mask & mask)[j*Npix:(j+1)*Npix].reshape(sh)
r_i = (data-model)[j*Npix:(j+1)*Npix].reshape(sh)
ds9.view(r_i * mask_i)
if verbose:
print(' {0} > Iter: {1:d}, masked: {2:2.0f}%, {3}'.format(grism['product'], iter+1, obj_mask.sum()/Npix/Nimg*100, coeffs))
out = np.linalg.lstsq(A[mask & obj_mask,:], data[mask & obj_mask])
coeffs = out[0]
# Test for convergence
if np.allclose(coeffs, coeffs_0, rtol=1.e-5, atol=iter_atol):
break
else:
coeffs_0 = coeffs
### Best-fit sky
sky = np.dot(A, coeffs).reshape(Nexp, Npix)
## log file
fp = open('{0}_{1}_sky_background.info'.format(grism['product'],ext), 'w')
fp.write('# file c1 {0}\n'.format(' '.join(['c{0:d}'.format(v+2)
for v in range(Nvary)])))
fp.write('# {0}\n'.format(grism['product']))
fp.write('# bg1: {0}\n'.format(bg_fixed[0]))
for v in range(Nvary):
fp.write('# bg{0:d}: {1}\n'.format(v+2, bg_vary[v]))
for j in range(Nexp):
file = grism['files'][j]
line = '{0} {1:9.4f}'.format(file, coeffs[0])
for v in range(Nvary):
k = Nfix + j*Nvary + v
line = '{0} {1:9.4f}'.format(line, coeffs[k])
fp.write(line+'\n')
fp.close()
if apply:
for j in range(Nexp):
file = grism['files'][j]
flt = pyfits.open(file, mode='update')
flt['SCI',ext].data -= sky[j,:].reshape(sh)*exptime[j]
header = flt[0].header
header['GSKYCOL{0:d}'.format(ext)] = (False, 'Subtract column average')
header['GSKYN{0:d}'.format(ext)] = (Nfix+Nvary, 'Number of sky images')
header['GSKY{0:d}01'.format(ext)] = (coeffs[0],
'Sky image {0} (fixed)'.format(bg_fixed[0]))
header['GSKY{0:d}01F'.format(ext)] = (bg_fixed[0], 'Sky image (fixed)')
for v in range(Nvary):
k = Nfix + j*Nvary + v
#print coeffs[k]
header['GSKY{0}{1:02d}'.format(ext, v+Nfix+1)] = (coeffs[k],
'Sky image {0} (variable)'.format(bg_vary[v]))
header['GSKY{0}{1:02d}F'.format(ext, v+Nfix+1)] = (bg_vary[v],
'Sky image (variable)')
flt.flush()
### Don't do `column_average` for ACS
if (not column_average) | isACS:
return isACS
######
### Now fit residual column average & make diagnostic plot
interactive_status=plt.rcParams['interactive']
plt.ioff()
fig = plt.figure(figsize=[6.,6.])
ax = fig.add_subplot(111)
im_shape = (1014,1014)
for j in range(Nexp):
file = grism['files'][j]
resid = (data[j*Npix:(j+1)*Npix] - sky[j,:]).reshape(im_shape)
m = (mask & obj_mask)[j*Npix:(j+1)*Npix].reshape(im_shape)
## Statistics of masked arrays
ma = np.ma.masked_array(resid, mask=(~m))
med = np.ma.median(ma, axis=0)
bg_sky = 0
yrms = np.ma.std(ma, axis=0)/np.sqrt(np.sum(m, axis=0))
xmsk = np.arange(im_shape[0])
yres = med
yok = (~yrms.mask) & np.isfinite(yrms) & np.isfinite(xmsk) & np.isfinite(yres)
if yok.sum() == 0:
print('ERROR: No valid pixels found!')
continue
### Fit column average with smoothed Gaussian Process model
if False:
#### xxx old GaussianProcess implementation
gp = GaussianProcess(regr='constant', corr='squared_exponential',
theta0=8, thetaL=5, thetaU=12,
nugget=(yrms/bg_sky)[yok][::1]**2,
random_start=10, verbose=True, normalize=True)
try:
gp.fit(np.atleast_2d(xmsk[yok][::1]).T, yres[yok][::1]+bg_sky)
except:
print('GaussianProces failed! Check that this exposure wasn\'t fried by variable backgrounds.')
continue
y_pred, MSE = gp.predict(np.atleast_2d(xmsk).T, eval_MSE=True)
gp_sigma = np.sqrt(MSE)
## Updated sklearn GaussianProcessRegressor
nmad_y = utils.nmad(yres)
gpscl = 100 # rough normalization
k1 = 0.3**2 * RBF(length_scale=80) # Background variations
k2 = 1**2 * WhiteKernel(noise_level=(nmad_y*gpscl)**2) # noise
gp_kernel = k1+k2#+outliers
yok &= np.abs(yres-np.median(yres)) < 50*nmad_y
gp = GaussianProcessRegressor(kernel=gp_kernel, alpha=nmad_y*gpscl/5,
optimizer='fmin_l_bfgs_b',
n_restarts_optimizer=0,
normalize_y=False,
copy_X_train=True, random_state=None)
gp.fit(np.atleast_2d(xmsk[yok][::1]).T, (yres[yok][::1]+bg_sky)*gpscl)
y_pred, gp_sigma = gp.predict(np.atleast_2d(xmsk).T, return_std=True)
gp_sigma /= gpscl
y_pred /= gpscl
## Plot Results
pi = ax.plot(med[0:2], alpha=0.2)
ax.plot(y_pred-bg_sky, color=pi[0].get_color())
ax.fill_between(xmsk, y_pred-bg_sky-gp_sigma, y_pred-bg_sky+gp_sigma,
color=pi[0].get_color(), alpha=0.3,
label=grism['files'][j].split('_fl')[0])
## result
fp = open(file.replace('_flt.fits', '_column.dat'), 'wb')
fp.write(b'# column obs_resid ok resid uncertainty\n')
np.savetxt(fp, np.array([xmsk, yres, yok*1, y_pred-bg_sky, gp_sigma]).T, fmt='%.5f')
fp.close()
if apply:
### Subtract the column average in 2D & log header keywords
gp_res = np.dot(y_pred[:,None]-bg_sky, np.ones((1014,1)).T).T
flt = pyfits.open(file, mode='update')
flt['SCI',1].data -= gp_res
flt[0].header['GSKYCOL'] = (True, 'Subtract column average')
flt.flush()
### Finish plot
ax.legend(loc='lower left', fontsize=10)
ax.plot([-10,1024],[0,0], color='k')
ax.set_xlim(-10,1024)
ax.set_xlabel(r'pixel column ($x$)')
ax.set_ylabel(r'column average (e-/s)')
ax.set_title(grism['product'])
ax.grid()
fig.tight_layout(pad=0.1)
fig.savefig('{0}_column.png'.format(grism['product']))
#fig.savefig('%s_column.pdf' %(grism['product']))
plt.close()
## Clean up large arrays
del(data); del(A); del(wht); del(mask); del(model)
if interactive_status:
plt.ion()
return False
def fix_star_centers(root='macs1149.6+2223-rot-ca5-22-032.0-f105w',
mag_lim=22, verbose=True, drizzle=False,
cutout_size=16):
"""Unset CR bit (4096) in the centers of bright objects
TBD
Parameters
----------
root : str
Root name of drizzle product (direct imaging).
mag_lim : float
Magnitude limit of objects to consider
verbose : bool
Print messages to the terminal
drizzle : bool
Redrizzle the output image
cutout_size : int
Size of the cutout to extract around the bright stars
Returns
-------
Nothing, updates FLT files in place.
"""
from drizzlepac.astrodrizzle import AstroDrizzle
EPSF = utils.EffectivePSF()
sci = pyfits.open('{0}_drz_sci.fits'.format(root))
#cat = Table.read('{0}.cat'.format(root), format='ascii.commented_header')
cat = utils.GTable.gread('{0}.cat.fits'.format(root))
# Load FITS files
N = sci[0].header['NDRIZIM']
images = []
wcs = []
for i in range(N):
flt = pyfits.open(sci[0].header['D{0:03d}DATA'.format(i+1)].split('[')[0], mode='update')
wcs.append(pywcs.WCS(flt[1], relax=True))
images.append(flt)
yp, xp = np.indices((1014,1014))
use = cat['MAG_AUTO'] < mag_lim
so = np.argsort(cat['MAG_AUTO'][use])
if verbose:
print('# {0:6s} {1:12s} {2:12s} {3:7s} {4} {5}'.format('id', 'ra',
'dec', 'mag',
'nDQ', 'nSat'))
for line in cat[use][so]:
rd = line['X_WORLD'], line['Y_WORLD']
nset = []
nsat = []
for i in range(N):
xi, yi = wcs[i].all_world2pix([rd[0],], [rd[1],], 0)
r = np.sqrt((xp-xi[0])**2 + (yp-yi[0])**2)
unset = (r <= 3) & ((images[i]['DQ'].data & 4096) > 0)
nset.append(unset.sum())
if nset[i] > 0:
images[i]['DQ'].data[unset] -= 4096
# Fill saturated with EPSF fit
satpix = (r <= 5) & (((images[i]['DQ'].data & 256) > 0) | ((images[i]['DQ'].data & 2048) > 0))
nsat.append(satpix.sum())
if nsat[i] > 0:
xpi = int(np.round(xi[0]))
ypi = int(np.round(yi[0]))
slx = slice(xpi-cutout_size, xpi+cutout_size)
sly = slice(ypi-cutout_size, ypi+cutout_size)
sci = images[i]['SCI'].data[sly, slx]
dq = images[i]['DQ'].data[sly, slx]
err = images[i]['ERR'].data[sly, slx]
ivar = 1/err**2
ivar[(~np.isfinite(ivar)) | (dq > 0)] = 0
# Fit the EPSF model
try:
psf_filter = images[0][0].header['FILTER']
psf_params = EPSF.fit_ePSF(sci, ivar=ivar, center=None,
tol=1.e-3, | |
show_points: It's boolean option, If false, points of
record will be hidden.
height: It's an integer option, It will determine heigth of chart
in pixel.
width: It's an integer option, It will determine width of chart
in pixel
Returns:
A string contains chart js code and import code of C3 static files, if
it did not imported yet.
You can see structure of chart in chart_structur variable.
"""
# line (X,Y) chart structure in JS
chart_structur = (
'\n<script type="text/javascript">'
'\n var chart = c3.generate({'
'\n bindto: "%s",'
'\n data: {'
'\n xs: { %s },'
'\n columns: [ %s ],'
'\n type : "%s",'
'\n colors: { %s },'
'\n groups: ['
'\n %s'
'\n ],'
'\n labels : %s'
'\n },'
'\n title: { text: "%s"},'
'\n grid: {'
'\n x: { show: %s ,lines: [%s] },'
'\n y: { show: %s ,lines: [%s] },'
'\n },'
'\n legend: { show: %s },'
'\n zoom: { enabled: %s },'
'\n point: { show: %s },'
'\n tooltip: { grouped: %s },'
'\n size: { height: %s, width: %s }'
'\n });'
'\n</script>'
)
# convert parameters to strings to be acceptable in JS and C3 syntax.
if angle and not area:
_type = 'line'
elif angle and area:
_type = 'area'
elif not angle and not area:
_type = 'spline'
elif not angle and area:
_type = 'area-spline'
else:
_type = 'line'
if labels:
labels = 'true'
else:
labels = 'false'
if vertical_grid_line:
vertical_grid_line = 'true'
else:
vertical_grid_line = 'false'
if horizontal_grid_line:
horizontal_grid_line = 'true'
else:
horizontal_grid_line = 'false'
if show_legend:
show_legend = 'true'
else:
show_legend = 'false'
if zoom:
zoom = 'true'
else:
zoom = 'false'
if show_points:
show_points = 'true'
else:
show_points = 'false'
if group_tooltip:
group_tooltip = 'true'
else:
group_tooltip = 'false'
if height is not None:
height = int(height)
else:
height = 'null'
if width is not None:
width = int(width)
else:
width = 'null'
# read horizontal line points from data
horizontal_lines = str()
if 'horizontal_lines' in data.keys():
for line in data['horizontal_lines']:
horizontal_lines = ''.join(
[horizontal_lines, '{ value: %s}' % line, ','])
# read vertical line points from data
vertical_lines = str()
if 'vertical_lines' in data.keys():
for line in data['vertical_lines']:
vertical_lines = ''.join(
[vertical_lines, '{ value: %s}' % line, ','])
# read records points to draw on chart
xy_mapping = str()
data_title_list = list()
chart_data = str()
for item in data['data']:
y_values = ','.join([str(v[1]) for v in item['values']])
item_data = '["%s", %s], ' % (item['title'], y_values)
chart_data = ' '.join([chart_data, item_data])
data_title_list.append(item['title'])
x_values = ','.join([str(v[0]) for v in item['values']])
item_data = '["%s", %s], ' % (item['title']+'_x', x_values)
chart_data = ' '.join([chart_data, item_data])
xy_mapping = ''.join(
[xy_mapping, '"%s": "%s"' %
(item['title'], item['title']+'_x'), ','])
# read colors of data
chart_color = str()
for item in data['data']:
if 'color' in item.keys():
item_color = '"%s": "%s", ' % (item['title'], item['color'])
chart_color = ' '.join([chart_color, item_color])
# read grouping details of data
total_group_string = str()
if 'groups' in data.keys():
for group in data['groups']:
group_string = str()
for item in group:
# raise an exception if mentioned key were not exist in data
if item not in data_title_list:
raise ValueError("%s is not exists in your data!" % item)
group_string = ''.join([group_string, ',', repr(item)])
total_group_string = ''.join(
[total_group_string, '[', group_string, ']', ','])
# pass arguments to chart structure
chart = chart_structur % (
bind_to, xy_mapping, chart_data, _type,
chart_color, total_group_string, labels, title,
vertical_grid_line, vertical_lines, horizontal_grid_line,
horizontal_lines, show_legend, zoom, show_points, group_tooltip,
height, width
)
# add import C3 elements to it, if it does not imported yet and return it.
if not ('import_js_c3' in context and context['import_js_c3']):
return mark_safe('%s\n%s' % (import_c3(), chart))
else:
return mark_safe(chart)
###############################################################################
@register.simple_tag(takes_context=True)
def line(
context, bind_to, data, title='', angle=True, area=False,
x_is_category=False, labels=False, vertical_grid_line=False,
horizontal_grid_line=False, show_legend=True, zoom=False,
show_points=True, group_tooltip=True, height=None, width=None
):
"""Generates javascript code to show a 'bar' chart.
Args:
context: Context of template.
bind_to: A string that specifics an HTML element (eg: id or class)
that chart will be shown in that. (like: '#chart')
data: It is dictinary that contains data of chart, some
informations about extra lines, grouping of data and
chart axis labels. eg:
{
'x': ['2017-5-19', '2017-5-20', '2017-5-21', '2017-5-22'],
'horizontal_lines': [40],
# 'vertical_lines': [40],
'data': [
{'title': 'A', 'values': [26, 5, 52, 74]},
{'title': 'B', 'values': [54, 21, 40, 26]},
{'title': 'C', 'values': [63, 14, 25, 11]},
],
# 'groups': [('B', 'C')]
}
vertical_lines works just if x_is_category seted to False.
title: A string that will be shown on top of the chart.
area: It's a boolean option. If true, the area under the curve
will be colored.
angle: It's a boolean option. If false, chart type will be spline.
x_is_category: It's a boolean option. If false, labels of X axis
will be considered as real number and sortable. (they will
be sorted automatically)
labels: It's a boolean option. If true, value of record will be
shown on column.
vertical_grid_line: It's boolean option, If true some vertical rows
will be drawn in chart. (grid lines)
horizontal_grid_line: It's boolean option, If true some horizontal
rows will be drawn in chart. (grid lines)
show_legend: It's boolean option, If false, legends of the chart
will be hidden.
zoom: It's boolean option, If true, end user can scroll on
chart to zoom in and zoom out.
group_tooltip: It's boolean option, If true, data of all records
in that point whill be shown to gather.
show_points: It's boolean option, If false, points of
record will be hidden.
height: It's an integer option, It will determine heigth of chart
in pixel.
width: It's an integer option, It will determine width of chart
in pixel
Returns:
A string contains chart js code and import code of C3 static files, if
it did not imported yet.
You can see structure of chart in chart_structur variable.
"""
# line/spline chart structure in JS
chart_structur = (
'\n<script type="text/javascript">'
'\n var chart = c3.generate({'
'\n bindto: "%s",'
'\n data: {'
'\n x: %s,'
'\n columns: [ %s ],'
'\n type : "%s",'
'\n colors: { %s },'
'\n groups: ['
'\n %s'
'\n ],'
'\n labels : %s'
'\n },'
'\n title: { text: "%s"},'
'\n axis: { x: { type: "%s" } },'
'\n grid: {'
'\n x: { show: %s ,lines: [%s] },'
'\n y: { show: %s ,lines: [%s] },'
'\n },'
'\n legend: { show: %s },'
'\n zoom: { enabled: %s },'
'\n point: { show: %s },'
'\n tooltip: { grouped: %s },'
'\n size: { height: %s, width: %s }'
'\n });'
'\n</script>'
)
# convert parameters to strings to be acceptable in JS and C3 syntax.
if angle and not area:
_type = 'line'
elif angle and area:
_type = 'area'
elif not angle and not area:
_type = 'spline'
elif not angle and area:
_type = 'area-spline'
else:
_type = 'line'
if x_is_category:
x_type = 'category'
else:
x_type = ''
if labels:
labels = 'true'
else:
labels = 'false'
if vertical_grid_line:
vertical_grid_line = 'true'
else:
vertical_grid_line = 'false'
if horizontal_grid_line:
horizontal_grid_line = 'true'
else:
horizontal_grid_line = 'false'
if show_legend:
show_legend = 'true'
else:
show_legend = 'false'
if zoom:
zoom = 'true'
else:
zoom = 'false'
if show_points:
show_points = 'true'
else:
show_points = 'false'
if group_tooltip:
group_tooltip = 'true'
else:
group_tooltip = 'false'
if height is not None:
height = int(height)
else:
height = 'null'
if width is not None:
width = int(width)
else:
width = 'null'
# read horizontal line points from data
horizontal_lines = str()
if 'horizontal_lines' in data.keys():
for line in data['horizontal_lines']:
horizontal_lines = ''.join(
[horizontal_lines, '{ value: %s}' % line, ','])
# read vertical line points from data
# raise an exception if x_is_category set to true and vertical_lines exists
vertical_lines = str()
if 'vertical_lines' in data.keys():
if x_is_category:
raise Exception(
"It's meaningless | |
import os
import requests
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import urlopen
from luigi import Target, LocalTarget
from hashlib import sha1
from tasks.util import (query_cartodb, underscore_slugify, OBSERVATORY_PREFIX, OBSERVATORY_SCHEMA)
from tasks.meta import (OBSColumn, OBSTable, metadata, Geometry, Point,
Linestring, OBSColumnTable, OBSTag, current_session)
from sqlalchemy import Table, types, Column
from lib.logger import get_logger
LOGGER = get_logger(__name__)
class PostgresTarget(Target):
'''
PostgresTarget which by default uses command-line specified login.
'''
def __init__(self, schema, tablename, non_empty=True, where="1 = 1"):
self._schema = schema
self._tablename = tablename
self._non_empty = non_empty
self._where = where
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self._schema,
tablename=self._tablename)
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return self._schema
@property
def qualified_tablename(self):
return '"{}".{}'.format(self.schema, self.tablename)
def _existenceness(self):
'''
Returns 0 if the table does not exist, 1 if it exists but has no
rows (is empty), and 2 if it exists and has one or more rows.
'''
session = current_session()
sql = '''
SELECT COUNT(*) FROM information_schema.tables
WHERE table_schema ILIKE '{schema}'
AND table_name ILIKE '{tablename}'
'''.format(
schema=self._schema,
tablename=self._tablename)
resp = session.execute(sql)
if int(resp.fetchone()[0]) == 0:
return 0
resp = session.execute(
'SELECT row_number() over () FROM "{schema}".{tablename} WHERE {where} LIMIT 1'.format(
schema=self._schema, tablename=self._tablename,
where=self._where))
if resp.fetchone() is None:
return 1
else:
return 2
def empty(self):
'''
Returns True if the table exists but has no rows in it.
'''
return self._existenceness() == 1
def exists(self):
'''
Returns True if the table exists and has at least one row in it.
'''
if self._non_empty:
return self._existenceness() == 2
else:
return self._existenceness() >= 1
def exists_or_empty(self):
'''
Returns True if the table exists, even if it is empty.
'''
return self._existenceness() >= 1
class CartoDBTarget(Target):
'''
Target which is a CartoDB table
'''
def __init__(self, tablename, carto_url=None, api_key=None):
self.tablename = tablename
self.carto_url = carto_url
self.api_key = api_key
def __str__(self):
return self.tablename
def exists(self):
resp = query_cartodb(
'SELECT row_number() over () FROM "{tablename}" LIMIT 1'.format(
tablename=self.tablename),
api_key=self.api_key,
carto_url=self.carto_url)
if resp.status_code != 200:
return False
return resp.json()['total_rows'] > 0
def remove(self, carto_url=None, api_key=None):
api_key = api_key or os.environ['CARTODB_API_KEY']
try:
while True:
resp = requests.get('{url}/api/v1/tables/{tablename}?api_key={api_key}'.format(
url=carto_url,
tablename=self.tablename,
api_key=api_key
))
viz_id = resp.json()['id']
# delete dataset by id DELETE
# https://observatory.cartodb.com/api/v1/viz/ed483a0b-7842-4610-9f6c-8591273b8e5c
try:
requests.delete('{url}/api/v1/viz/{viz_id}?api_key={api_key}'.format(
url=carto_url,
viz_id=viz_id,
api_key=api_key
), timeout=1)
except requests.Timeout:
pass
except ValueError:
pass
query_cartodb('DROP TABLE IF EXISTS {tablename}'.format(tablename=self.tablename))
assert not self.exists()
class ColumnTarget(Target):
'''
'''
def __init__(self, column, task):
self._id = column.id
self._task = task
self._column = column
def get(self, session):
'''
Return a copy of the underlying OBSColumn in the specified session.
'''
with session.no_autoflush:
return session.query(OBSColumn).get(self._id)
def update_or_create(self):
self._column = current_session().merge(self._column)
def exists(self):
existing = self.get(current_session())
new_version = float(self._column.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
current_session().expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
return True
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TagTarget(Target):
'''
'''
def __init__(self, tag, task):
self._id = tag.id
self._tag = tag
self._task = task
_tag_cache = {}
def get(self, session):
'''
Return a copy of the underlying OBSTag in the specified session.
'''
if not self._tag_cache.get(self._id, None):
with session.no_autoflush:
self._tag_cache[self._id] = session.query(OBSTag).get(self._id)
return self._tag_cache[self._id]
def update_or_create(self):
with current_session().no_autoflush:
self._tag = current_session().merge(self._tag)
def exists(self):
session = current_session()
existing = self.get(session)
new_version = self._tag.version or 0.0
if existing:
if existing in session:
session.expunge(existing)
existing_version = existing.version or 0.0
if float(existing_version) == float(new_version):
return True
if existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TableTarget(Target):
def __init__(self, schema, name, obs_table, columns, task):
'''
columns: should be an ordereddict if you want to specify columns' order
in the table
'''
self._id = '.'.join([schema, name])
obs_table.id = self._id
obs_table.tablename = '{prefix}{name}'.format(prefix=OBSERVATORY_PREFIX, name=sha1(
underscore_slugify(self._id).encode('utf-8')).hexdigest())
self.table = '{schema}.{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.qualified_tablename = '"{schema}".{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.obs_table = obs_table
self._tablename = obs_table.tablename
self._schema = schema
self._name = name
self._obs_dict = obs_table.__dict__.copy()
self._columns = columns
self._task = task
if obs_table.tablename in metadata.tables:
self._table = metadata.tables[obs_table.tablename]
else:
self._table = None
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return 'observatory'
def sync(self):
'''
Whether this data should be synced to carto. Defaults to True.
'''
return True
def exists(self):
'''
We always want to run this at least once, because we can always
regenerate tabular data from scratch.
'''
session = current_session()
existing = self.get(session)
new_version = float(self.obs_table.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
if existing in session:
session.expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
resp = session.execute(
'SELECT COUNT(*) FROM information_schema.tables '
"WHERE table_schema = '{schema}' "
" AND table_name = '{tablename}' ".format(
schema='observatory',
tablename=existing.tablename))
if int(resp.fetchone()[0]) == 0:
return False
resp = session.execute(
'SELECT row_number() over () '
'FROM "{schema}".{tablename} LIMIT 1 '.format(
schema='observatory',
tablename=existing.tablename))
return resp.fetchone() is not None
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
def get(self, session):
'''
Return a copy of the underlying OBSTable in the specified session.
'''
with session.no_autoflush:
return session.query(OBSTable).get(self._id)
def update_or_create_table(self):
session = current_session()
# create new local data table
columns = []
for colname, coltarget in list(self._columns.items()):
colname = colname.lower()
col = coltarget.get(session)
# Column info for sqlalchemy's internal metadata
if col.type.lower() == 'geometry':
coltype = Geometry
elif col.type.lower().startswith('geometry(point'):
coltype = Point
elif col.type.lower().startswith('geometry(linestring'):
coltype = Linestring
# For enum type, pull keys from extra["categories"]
elif col.type.lower().startswith('enum'):
cats = list(col.extra['categories'].keys())
coltype = types.Enum(*cats, name=col.id + '_enum')
else:
coltype = getattr(types, col.type.capitalize())
columns.append(Column(colname, coltype))
obs_table = self.get(session) or self.obs_table
# replace local data table
if obs_table.id in metadata.tables:
metadata.tables[obs_table.id].drop()
self._table = Table(obs_table.tablename, metadata, *columns,
extend_existing=True, schema='observatory')
session.commit()
self._table.drop(checkfirst=True)
self._table.create()
def update_or_create_metadata(self, _testmode=False):
session = current_session()
# replace metadata table
self.obs_table = session.merge(self.obs_table)
obs_table = self.obs_table
for i, colname_coltarget in enumerate(self._columns.items()):
colname, coltarget = colname_coltarget
colname = colname.lower()
col = coltarget.get(session)
if _testmode:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
else:
# Column info for obs metadata
coltable = session.query(OBSColumnTable).filter_by(
column_id=col.id, table_id=obs_table.id).first()
if coltable:
coltable.colname = colname
else:
# catch the case where a column id has changed
coltable = session.query(OBSColumnTable).filter_by(
table_id=obs_table.id, colname=colname).first()
if coltable:
coltable.column = col
else:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
session.add(coltable)
class RepoTarget(LocalTarget):
def __init__(self, schema, tablename, repo_dir, resource_id, version, filename):
self.format = None
self.is_tmp = False
self.schema = schema
self.tablename = tablename
self.repo_dir = repo_dir
self.resource_id = resource_id
self.version = version
self.filename = filename
@property
def path(self):
path = self._get_path()
if path and os.path.isfile(path):
return path
else:
return self._build_path()
def _build_path(self):
return os.path.join(self.repo_dir, self.resource_id, str(self.version), self.filename)
def _get_path(self):
path = None
query = '''
SELECT path FROM "{schema}".{table}
WHERE id = '{resource_id}'
AND version = {version}
'''.format(schema=self.schema,
table=self.tablename,
resource_id=self.resource_id,
version=self.version)
try:
result = current_session().execute(query).fetchone()
if result:
path = result[0]
except:
path = None
return path
def exists(self):
path = self._get_path()
return path and os.path.isfile(path)
class ConstraintExistsTarget(Target):
def __init__(self, schema, table, constraint):
self.schema = schema
self.tablename = table
self.constraint = constraint
self.session = current_session()
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self.schema,
tablename=self.tablename)
def exists(self):
sql = "SELECT 1 FROM information_schema.constraint_column_usage " \
"WHERE table_schema = '{schema}' " \
" AND table_name ilike '{table}' " \
" AND constraint_name = '{constraint}'"
check = sql.format(schema=self.schema,
table=self.tablename,
constraint=self.constraint)
return len(self.session.execute(check).fetchall()) > 0
class PostgresFunctionTarget(Target):
def __init__(self, schema, function_name):
self._schema = schema
self._function_name = function_name
self._session = current_session()
@property
def function(self):
return '"{schema}".{function_name}'.format(schema=self._schema,
function_name=self._function_name)
@property
def function_name(self):
return self._function_name
@property
def schema(self):
return self._schema
def exists(self):
query = '''
SELECT 1 FROM information_schema.routines
WHERE routine_schema = '{schema}'
AND routine_name = '{function_name}'
'''.format(
schema=self._schema,
function_name=self._function_name)
return len(self._session.execute(query).fetchall()) > 0
class URLTarget(Target):
'''
Accepts both local paths and urls
'''
def __init__(self, url):
self.path = url
scheme = urlparse(url).scheme
if scheme == '':
self.url = 'file://{}'.format(url)
else:
self.url = url
def exists(self):
try:
urlopen(self.url)
return True
except | |
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_4_nistxml_sv_iv_list_nmtokens_min_length_5_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-5-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_4_nistxml_sv_iv_list_nmtokens_min_length_5_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-5-5.xml",
class_name="NistschemaSvIvListNmtokensMinLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_3_nistxml_sv_iv_list_nmtokens_min_length_4_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-4-1.xml",
class_name="NistschemaSvIvListNmtokensMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_3_nistxml_sv_iv_list_nmtokens_min_length_4_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-4-2.xml",
class_name="NistschemaSvIvListNmtokensMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_3_nistxml_sv_iv_list_nmtokens_min_length_4_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-4-3.xml",
class_name="NistschemaSvIvListNmtokensMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_3_nistxml_sv_iv_list_nmtokens_min_length_4_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-4-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_3_nistxml_sv_iv_list_nmtokens_min_length_4_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-4-5.xml",
class_name="NistschemaSvIvListNmtokensMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_2_nistxml_sv_iv_list_nmtokens_min_length_3_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-3-1.xml",
class_name="NistschemaSvIvListNmtokensMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_2_nistxml_sv_iv_list_nmtokens_min_length_3_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-3-2.xml",
class_name="NistschemaSvIvListNmtokensMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_2_nistxml_sv_iv_list_nmtokens_min_length_3_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-3-3.xml",
class_name="NistschemaSvIvListNmtokensMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_2_nistxml_sv_iv_list_nmtokens_min_length_3_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-3-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_2_nistxml_sv_iv_list_nmtokens_min_length_3_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-3-5.xml",
class_name="NistschemaSvIvListNmtokensMinLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_1_nistxml_sv_iv_list_nmtokens_min_length_2_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-2-1.xml",
class_name="NistschemaSvIvListNmtokensMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_1_nistxml_sv_iv_list_nmtokens_min_length_2_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-2-2.xml",
class_name="NistschemaSvIvListNmtokensMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_1_nistxml_sv_iv_list_nmtokens_min_length_2_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-2-3.xml",
class_name="NistschemaSvIvListNmtokensMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_1_nistxml_sv_iv_list_nmtokens_min_length_2_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-2-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_1_nistxml_sv_iv_list_nmtokens_min_length_2_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-2-5.xml",
class_name="NistschemaSvIvListNmtokensMinLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-1.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-2.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-3.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-4.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_min_length_nistxml_sv_iv_list_nmtokens_min_length_1_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet minLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-minLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-minLength-1-5.xml",
class_name="NistschemaSvIvListNmtokensMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_4_nistxml_sv_iv_list_nmtokens_max_length_5_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-5-1.xml",
class_name="NistschemaSvIvListNmtokensMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_4_nistxml_sv_iv_list_nmtokens_max_length_5_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-5-2.xml",
class_name="NistschemaSvIvListNmtokensMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_4_nistxml_sv_iv_list_nmtokens_max_length_5_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-5-3.xml",
class_name="NistschemaSvIvListNmtokensMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_4_nistxml_sv_iv_list_nmtokens_max_length_5_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-5-4.xml",
class_name="NistschemaSvIvListNmtokensMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_4_nistxml_sv_iv_list_nmtokens_max_length_5_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 10.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-5.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-5-5.xml",
class_name="NistschemaSvIvListNmtokensMaxLength5",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_3_nistxml_sv_iv_list_nmtokens_max_length_4_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-4-1.xml",
class_name="NistschemaSvIvListNmtokensMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_3_nistxml_sv_iv_list_nmtokens_max_length_4_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-4-2.xml",
class_name="NistschemaSvIvListNmtokensMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_3_nistxml_sv_iv_list_nmtokens_max_length_4_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-4-3.xml",
class_name="NistschemaSvIvListNmtokensMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_3_nistxml_sv_iv_list_nmtokens_max_length_4_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-4-4.xml",
class_name="NistschemaSvIvListNmtokensMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_3_nistxml_sv_iv_list_nmtokens_max_length_4_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-4.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-4-5.xml",
class_name="NistschemaSvIvListNmtokensMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_2_nistxml_sv_iv_list_nmtokens_max_length_3_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-3-1.xml",
class_name="NistschemaSvIvListNmtokensMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_2_nistxml_sv_iv_list_nmtokens_max_length_3_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-3-2.xml",
class_name="NistschemaSvIvListNmtokensMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_2_nistxml_sv_iv_list_nmtokens_max_length_3_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-3-3.xml",
class_name="NistschemaSvIvListNmtokensMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_2_nistxml_sv_iv_list_nmtokens_max_length_3_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-3-4.xml",
class_name="NistschemaSvIvListNmtokensMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_2_nistxml_sv_iv_list_nmtokens_max_length_3_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 7.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-3.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-3-5.xml",
class_name="NistschemaSvIvListNmtokensMaxLength3",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_1_nistxml_sv_iv_list_nmtokens_max_length_2_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-2-1.xml",
class_name="NistschemaSvIvListNmtokensMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_1_nistxml_sv_iv_list_nmtokens_max_length_2_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-2-2.xml",
class_name="NistschemaSvIvListNmtokensMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_1_nistxml_sv_iv_list_nmtokens_max_length_2_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-2-3.xml",
class_name="NistschemaSvIvListNmtokensMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_1_nistxml_sv_iv_list_nmtokens_max_length_2_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-2-4.xml",
class_name="NistschemaSvIvListNmtokensMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_1_nistxml_sv_iv_list_nmtokens_max_length_2_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 6.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-2.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-2-5.xml",
class_name="NistschemaSvIvListNmtokensMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_nistxml_sv_iv_list_nmtokens_max_length_1_1(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-1-1.xml",
class_name="NistschemaSvIvListNmtokensMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_nistxml_sv_iv_list_nmtokens_max_length_1_2(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-1-2.xml",
class_name="NistschemaSvIvListNmtokensMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_nistxml_sv_iv_list_nmtokens_max_length_1_3(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-1-3.xml",
class_name="NistschemaSvIvListNmtokensMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_nistxml_sv_iv_list_nmtokens_max_length_1_4(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-1-4.xml",
class_name="NistschemaSvIvListNmtokensMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtokens_max_length_nistxml_sv_iv_list_nmtokens_max_length_1_5(mode, save_output, output_format):
"""
Type list/NMTOKENS is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKENS/Schema+Instance/NISTSchema-SV-IV-list-NMTOKENS-maxLength-1.xsd",
instance="nistData/list/NMTOKENS/Schema+Instance/NISTXML-SV-IV-list-NMTOKENS-maxLength-1-5.xml",
class_name="NistschemaSvIvListNmtokensMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_1(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-whiteSpace-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-whiteSpace-1-1.xml",
class_name="NistschemaSvIvListNmtokenWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-whiteSpace-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-whiteSpace-1-2.xml",
class_name="NistschemaSvIvListNmtokenWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_3(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-whiteSpace-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-whiteSpace-1-3.xml",
class_name="NistschemaSvIvListNmtokenWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_4(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-whiteSpace-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-whiteSpace-1-4.xml",
class_name="NistschemaSvIvListNmtokenWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
def test_list_nmtoken_white_space_nistxml_sv_iv_list_nmtoken_white_space_1_5(mode, | |
match
"""
matches = {}
name = item.data.get("name")
if not name:
return matches
db = current.db
s3db = current.s3db
table = item.table
# Search by name
lower_name = s3_unicode(name).lower()
query = (table.name.lower() == lower_name) & \
(table.deleted == False)
rows = db(query).select(table.id, table.name)
if not rows and current.deployment_settings.get_L10n_translate_org_organisation():
# Search by local name
ltable = s3db.org_organisation_name
query = (ltable.name_l10n.lower() == lower_name) & \
(ltable.organisation_id == table.id) & \
(ltable.deleted != True)
rows = db(query).select(table.id, table.name)
if rows:
# Get the parents for all matches
matches = dict((row.id, {"name": row.name}) for row in rows)
btable = s3db.org_organisation_branch
query = (btable.branch_id.belongs(matches.keys())) & \
(btable.deleted != True)
links = db(query).select(btable.organisation_id,
btable.branch_id,
)
for link in links:
matches[link.branch_id]["parent"] = link.organisation_id
return matches
# -------------------------------------------------------------------------
@classmethod
def parent(cls, item):
"""
Find the parent for the given import item
@param item: the import item
@return: a tuple (id, uid, item) for the parent
"""
parent_id = parent_uid = parent_item = None
is_key = lambda fk, name: fk == name or \
isinstance(fk, (tuple, list)) and \
fk[1] == name
all_items = item.job.items
for uid, link_item in all_items.items():
if link_item.tablename == "org_organisation_branch":
references = link_item.references
parent = branch = None
for reference in references:
fk = reference.field
if is_key(fk, "branch_id"):
branch = reference.entry
elif is_key(fk, "organisation_id"):
parent = reference.entry
if parent and branch:
break
if parent and branch and branch.item_id == item.item_id:
parent_id = parent.id
parent_uid = parent.uid
parent_item = all_items.get(parent.item_id)
break
return parent_id, parent_uid, parent_item
# =============================================================================
class org_AssignMethod(S3Method):
"""
Custom Method to allow organisations to be assigned to something
e.g. Organisation Group
"""
def __init__(self, component, types=None):
"""
@param component: the Component in which to create records
"""
self.component = component
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
component = self.component
components = r.resource.components
for c in components:
if c == component:
component = components[c]
break
try:
if component.link:
component = component.link
except:
current.log.error("Invalid Component!")
raise
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
get_vars = r.get_vars
response = current.response
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = r.record[fkey]
else:
record_id = r.id
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
db = current.db
table = s3db[tablename]
if selected:
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
hresource = s3db.resource("org_organisation",
filter=query, vars=filters)
rows = hresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
query = (table.organisation_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept",
None)
)
for organisation_id in selected:
try:
org_id = int(organisation_id.strip())
except ValueError:
continue
if org_id not in rows:
link = Storage(organisation_id = organisation_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars=link)
onaccept(form)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
dict(number=added)
if added > 0:
redirect(URL(args=[r.id, "organisation"], vars={}))
else:
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets
filter_widgets = []
# List fields
list_fields = ["id",
"name",
]
# Data table
resource = s3db.resource("org_organisation")
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Add"), "assign")]
if r.representation == "html":
# Page load
resource.configure(deletable = False)
profile_url = URL(c = "org",
f = "organisation",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=URL(args = r.args,
extension="aadata",
vars={},
),
dt_bulk_actions=dt_bulk_actions,
dt_pageLength=display_length,
dt_pagination="true",
dt_searching="false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="human_resource",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
output = dict(items = items,
title = T("Add Organization"),
list_filter_form = ff)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class org_CapacityReport(S3Method):
"""
Custom Report Method for Organisation Capacity Assessment Data
"""
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.http == "GET":
if r.representation == "html":
T = current.T
output = dict(title = T("Branch Organisational Capacity Assessment"))
current.response.view = "org/capacity_report.html"
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
data = self._extract(r)
if data is None:
output["items"] = current.response.s3.crud_strings["org_capacity_assessment"].msg_list_empty
return output
indicators, orgs, consolidated = data
# Build the output table
rows = []
rappend = rows.append
section = None
for i in indicators:
if i.section != section:
section = i.section
rappend(TR(TD(section), _class="odd"))
title = TD("%s. %s" % (i.number, i.name))
row = TR(title)
append = row.append
indicator_id = i.id
values = consolidated[indicator_id]
for v in ("A", "B", "C", "D", "E", "F"):
append(TD(values[v]))
for o in orgs:
rating = orgs[o].get(indicator_id, "")
append(TD(rating))
rappend(row)
orepresent = org_OrganisationRepresent(parent=False,
acronym=False)
orgs = [TH(orepresent(o)) for o in orgs]
items = TABLE(THEAD(TR(TH("TOPICS", _rowspan=2),
TH("Consolidated Ratings", _colspan=6),
),
TR(TH("A"),
TH("B"),
TH("C"),
TH("D"),
TH("E"),
TH("F"),
*orgs
),
),
TBODY(*rows),
)
output["items"] = items
return output
elif r.representation == "xls":
data = self._extract(r)
if data is None:
current.session.error = current.response.s3.crud_strings["org_capacity_assessment"].msg_list_empty
redirect(URL(f="capacity_assessment", extension=""))
return self._xls(data)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
@staticmethod
def _extract(r):
"""
Method to read the data
@param r: the S3Request
"""
# Read all the permitted data
resource = r.resource
resource.load()
rows = resource._rows
if not len(rows):
return None
db = current.db
s3db = current.s3db
# Read all the Indicators
itable = s3db.org_capacity_indicator
indicators = db(itable.deleted == False).select(itable.id,
itable.number,
itable.section,
itable.name,
orderby = itable.number,
)
# Find all the Assessments
assessments = [row.assessment_id for row in rows]
atable = s3db.org_capacity_assessment
assessments = db(atable.id.belongs(assessments)).select(atable.id,
atable.organisation_id,
#atable.date,
# We will just include the most recent for each organisation
orderby = ~atable.date,
)
# Find all the Organisations and the Latest Assessments
latest_assessments = {}
orgs = {}
for a in assessments:
o = a.organisation_id
if o not in orgs:
latest_assessments[a.id] = o
orgs[o] = {}
# Calculate the Consolidated Ratings & populate the individual ratings
consolidated = {}
for i in indicators:
consolidated[i.id] = {"A": 0,
"B": 0,
"C": 0,
"D": 0,
"E": 0,
"F": 0,
}
for row in rows:
a = row.assessment_id
if a in latest_assessments:
indicator = row.indicator_id
rating = row.rating
# Update the Consolidated
consolidated[indicator][row.rating] += 1
# Lookup which org this data belongs to
o = latest_assessments[a]
# Populate the Individual
orgs[o][indicator] = row.rating
return indicators, orgs, consolidated
# -------------------------------------------------------------------------
@staticmethod
def _xls(data):
"""
Method to output as XLS
@ToDo: Finish & use HTML2XLS method in XLS codec to be DRY & reusable
"""
try:
import xlwt
except ImportError:
if current.auth.permission.format in S3Request.INTERACTIVE_FORMATS:
current.session.error = S3XLS.ERROR.XLWT_ERROR
redirect(URL(extension=""))
else:
error = S3XLS.ERROR.XLWT_ERROR
current.log.error(error)
return error
indicators, orgs, consolidated = data
# | |
from __future__ import print_function
from skimage.util.dtype import img_as_float32
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import numpy as np
import os
import glob
import skimage.io as io
import skimage.transform as trans
from skimage.util import img_as_uint
from pathlib import Path
import cv2
from sklearn.utils import class_weight
from PIL import Image, ImageOps, ImageDraw, ImageFont
from tensorflow.python.keras.preprocessing.image import DirectoryIterator
from tensorflow.keras import backend as K
import matplotlib.pyplot as plt
class Data():
sky = [128, 128, 128] # gray
building = [128, 0, 0] # red
pole = [192, 192, 128] # bege
road = [128, 64, 128] # purple
pavement = [60, 40, 222] # blue
tree = [128, 128, 0] # Olive
sign_symbol = [192, 128, 128] # almost red
fence = [64, 64, 128] # blue
car = [64, 0, 128] # dark purple
pedestrian = [64, 64, 0] # green or yellow, I dont know
bicyclist = [0, 128, 192] # dark washed azure
unlabelled = [0, 0, 0] # black
predict_suffix = '_predict.png'
COLOR_DICT = np.array(
[
sky,
building,
pole,
road,
pavement,
tree,
sign_symbol,
fence,
car,
pedestrian,
bicyclist,
unlabelled,
]
)
@staticmethod
def adjust_data(img, mask, flag_multi_class, num_class):
if flag_multi_class:
img = img / 255
mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
# for one pixel in the image, find the class in mask and convert it into one-hot vector
# index = np.where(mask == i)
# index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
# new_mask[index_mask] = 1
new_mask[mask == i, i] = 1
new_mask = (
np.reshape(
new_mask,
(
new_mask.shape[0],
new_mask.shape[1] * new_mask.shape[2],
new_mask.shape[3],
),
)
if flag_multi_class
else np.reshape(
new_mask, (new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2])
)
)
mask = new_mask
elif np.max(img) > 1:
img = img.astype('float32')
img = img / 255
mask = mask.astype('float32')
mask = mask / 255
mask[mask >= 0.5] = 1
mask[mask < 0.5] = 0
return (img, mask)
@staticmethod
def data_generator(
batch_size,
data_path,
image_folder,
mask_folder,
aug_dict,
image_color_mode="rgb",
mask_color_mode="rgb",
image_save_prefix="image",
mask_save_prefix="mask",
flag_multi_class=False,
num_class=2,
save_to_dir=None,
target_size=(256, 256),
seed=1,
class_mode=None,
):
"""
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
"""
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
data_path,
classes=[image_folder],
class_mode=class_mode,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed,
save_format='jpg'
)
mask_generator = mask_datagen.flow_from_directory(
data_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed,
save_format='jpg'
)
generator = zip(image_generator, mask_generator)
for (img, mask) in generator:
img, mask = Data.adjust_data(img, mask, flag_multi_class, num_class)
yield (img, mask)
@staticmethod
def test_generator(
imgs,
path,
ext=".JPG",
num_image=30,
target_size=(256, 256),
flag_multi_class=False,
as_gray=False
):
for item in imgs:
img = io.imread(path + item, as_gray=as_gray)
img = img.astype('float32')
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, (1,) + img.shape)
yield img
@staticmethod
def gene_data_npy(
data_path,
flag_multi_class=False,
num_class=2,
image_folder="images",
mask_folder="masks",
image_as_gray=False,
mask_as_gray=False,
):
image_path = os.path.join(data_path, image_folder)
mask_path = os.path.join(data_path, image_folder)
image_name_arr = glob.glob(os.path.join(image_path, "*.JPG"))
image_arr = []
mask_arr = []
for index, item in enumerate(image_name_arr):
img = io.imread(item, as_gray=image_as_gray)
img = np.reshape(img, img.shape + (1,)) if image_as_gray else img
mask = io.imread(
item.replace(image_path, mask_path).replace(image_folder, mask_folder),
as_gray=mask_as_gray,
)
mask = np.reshape(mask, mask.shape + (1,)) if mask_as_gray else mask
img, mask = Data.adjust_data(img, mask, flag_multi_class, num_class)
image_arr.append(img)
mask_arr.append(mask)
if index % 50 == 0:
print(f"generate data - {index}/{len(image_name_arr)}")
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr, mask_arr
@staticmethod
def label_visualize(num_class, color_dict, img):
img = img[:, :, 0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i, :] = color_dict[i]
img_out = img_out.astype('float32')
return img_out / 255
@staticmethod
def save_result(save_path, npyfile, imgs, flag_multi_class=False, num_class=2):
img_path = save_path + "images"
save_path = save_path + "results"
Path(save_path).mkdir(parents=True, exist_ok=True)
for i, item in enumerate(npyfile):
if flag_multi_class:
img = Data.label_visualize(num_class, Data.COLOR_DICT, item)
io.imsave(os.path.join(save_path, imgs[i] + Data.predict_suffix), img)
else:
img = item[:, :, :]
prediction_binary = np.abs(np.mean(img, axis=2) > 0.5) * 255
prediction_binary = prediction_binary.astype(np.uint8)
# gerando só a imagem original com a mascara sobre ela logo abaixo.
#io.imsave(os.path.join(save_path, imgs[i] + Data.predict_suffix), prediction_binary)
original_img = io.imread(os.path.join(img_path, imgs[i]), as_gray=False)
original_img = Image.fromarray(original_img).convert("RGBA")
original_img = original_img.resize((1008,752), Image.ANTIALIAS)
prediction_img = Image.fromarray(prediction_binary).convert("RGBA")
prediction_arr = np.array(prediction_img)
white_mask = (prediction_arr[:, :, 0:3] == [255,255,255]).all(2)
black_mask = (prediction_arr[:, :, 0:3] == [0,0,0]).all(2)
# Make all pixels matched by mask into transparent ones (0, 0, 0, 0)
prediction_arr[black_mask] = (124, 252, 0, 70) # green with 30 opacity
# Make all pixels matched by mask into "filter" purple color with transparency
alpha = 127
color = (255, 215, 0, alpha) # yellow
prediction_arr[white_mask] = color
prediction_img = Image.fromarray(prediction_arr)
composed_image = Image.new('RGBA', original_img.size, color="gray")
composed_image.paste(original_img, (0,0), original_img)
composed_image.paste(prediction_img, (0,0), mask=prediction_img)
#composed_image.show()
composed_image.save(os.path.join(save_path, imgs[i] + "_original" + Data.predict_suffix))
@staticmethod
def compare_result(base_path, imgs, font_path = 'assets/arial-unicode.ttf'):
save_path = base_path + "diff/"
Path(save_path).mkdir(parents=True, exist_ok=True)
font = ImageFont.truetype(font_path, 18)
gsd = 1.73 # 1.73cm/px
for i, file_name in enumerate(imgs):
image_data = Data.load_images_to_compare(base_path, file_name)
images = image_data[0]
images_masks_size = image_data[1]
border = columns = 4
subtitle_size = 20
rows = 2
total_width, max_height, border, img_mais_larga, img_mais_alta = Data.get_sizes(images, border, columns, rows, subtitle_size)
composed_img = Image.new('RGBA', (total_width, max_height), color="gray")
x_offset = border
y_offset = border + subtitle_size
total_imgs = (len(images) if rows * columns > len(images) else rows * columns)
current_row = 0
current_column = 0
for j in range(0, total_imgs):
current_column += 1
img = images[j]
mask_size = images_masks_size[j]
Data.add_image(composed_img, img, current_column, x_offset, y_offset, subtitle_size, border, font, mask_size, gsd)
x_offset += (img_mais_larga + border)
if(current_column == columns):
current_column = 0
current_row += 1
x_offset = border
y_offset = (subtitle_size + border) * (current_row + 1) + img_mais_alta
composed_img.save(os.path.join(save_path, imgs[i] + ".png"))
# if(i >= 5):
# break
@staticmethod
def load_images_to_compare(base_path, file_name):
original = io.imread(os.path.join(base_path, "images", file_name), as_gray=False)
mask = io.imread(os.path.join(base_path, "masks", file_name), as_gray=False)
predict = io.imread(os.path.join(base_path, "results", file_name + Data.predict_suffix), as_gray=False)
mask[mask >= 128] = 255
mask[mask < 128] = 0
mask_predict_diff = ((np.abs(mask[:,:,0] - predict) > 0.5) * 255).astype(np.uint8)
original_rgba = Image.fromarray(original).convert("RGBA")
original_filter_mask, mask_pixels = Data.parse_mask_to_rgba(mask, 0)
original_filter_predict, predict_mask_pixels = Data.parse_mask_to_rgba(predict, 1)
original_filter_diff, diff_mask_pixels = Data.parse_mask_to_rgba(mask_predict_diff, 2)
original = Image.fromarray(original)
image_data = [
[
original,
Image.fromarray(mask).convert('RGB'),
Image.fromarray(predict).convert('RGB'),
Image.fromarray(mask_predict_diff).convert('RGB'),
original,
Image.alpha_composite(original_rgba, original_filter_mask),
Image.alpha_composite(original_rgba, original_filter_predict),
Image.alpha_composite(original_rgba, original_filter_diff),
],
[
0,
0,
0,
0,
0,
mask_pixels,
predict_mask_pixels,
diff_mask_pixels
]
]
return image_data
@staticmethod
def get_sizes(images, border, columns, rows, subtitle_size):
widths, heights = zip(*(img.size for img in images))
img_mais_alta = max(widths)
img_mais_larga = max(heights)
total_width = (img_mais_alta * columns) + ((columns + 1) * border)
max_height = (img_mais_larga * rows) + ((rows + 1) * (border + subtitle_size))
return total_width, max_height, border, img_mais_larga, img_mais_alta
@staticmethod
def add_image(composed_img, img, column_type, x_offset, y_offset, subtitle_size, border, font, mask_size, gsd):
composed_img.paste(img, (x_offset, y_offset))
draw = ImageDraw.Draw(composed_img)
subtitle_text = Data.get_image_subtitle_text(column_type, mask_size, gsd)
draw.text((x_offset, y_offset - subtitle_size - border), subtitle_text, (255, 255, 255), font=font)
@staticmethod
def get_image_subtitle_text(column_type, mask_size, gsd):
if(column_type == 1):
return f'original{Data.get_size(mask_size, gsd)}'
elif (column_type == 2):
return f'ground-truth{Data.get_size(mask_size, gsd)}'
elif (column_type == 3):
return f'predict{Data.get_size(mask_size, gsd)}'
else:
return f'diff gt/predict{Data.get_size(mask_size, gsd)}'
@staticmethod
def get_size(mask_size, gsd):
if(mask_size == 0 or gsd == 0):
return ''
total = mask_size * gsd
if(total / 100 > 1):
return f': {mask_size}px - {round(total / 10000, 2)}m²'
else:
return f': {mask_size}px - {round(total, 2)}cm'
@staticmethod
def parse_mask_to_rgba(mask, mask_type):
mask = Image.fromarray(mask)
if(mask.mode != 'RGBA' and mask.mode != 'RGB'):
mask = mask.convert('RGB')
RGBA = np.dstack((mask, np.zeros(mask.size, dtype = np.uint8) + 255))
# Make mask of black or white pixels too. - mask is True where image has the right color
black_mask = (RGBA[:, :, 0:3] == [0,0,0]).all(2)
white_mask = (RGBA[:, :, 0:3] == [255,255,255]).all(2)
# Make all pixels matched by mask into transparent ones
RGBA[black_mask] = (0,0,0,0)
# Make | |
# Avoid negative density
raise ValueError("Static density {}kg/m**3 cannot be negative".format(static_density))
else:
rho = static_density
qi = 0.5*rho*vel**2
return qi
def dynamic_pressure_from_statpress(self, mach, static_pressure, static_temperature=None):
"""
Calculates the dynamic pressure of the flow, provided its static pressure
and the Mach number.
Note that if the flow can be considered incompressible (isochoric), thus
the Mach number of the flow is less than 0.3, then the stagnation pressure
can be obtained as the static pressure plus the dynamic pressure:
if M<0.3, then t = p + qi
Inputs:
-------
mach: float. Mach number of the fluid [non-dimensional]
static_pressure: float. Local static pressure [Pa]
static_temperature: float. Static temperature of the gas [K]
If no static temperature is provided T_ref=298.15K is
taken if fluid is Perfect gas, otherwise a valid static
temperature must be provided.
Outputs:
--------
qi: float. Dynamic pressure of the flow [Pa]
"""
if static_temperature is None:
if not isinstance(self.fluid, PerfectIdealGas):
# Gamma for a Perfect gas
raise ValueError("A valid static temperature must be provided")
else:
# If Perfect gas a temperature must be provided
gamma = self.fluid.gamma()
else:
# If Semiperfect gas a temperature must be provided
gamma = self.fluid.gamma(static_temperature)
if not 0<static_pressure:
# Avoid negative pressure
raise ValueError("Static pressure {}Pa cannot be negative".format(static_pressure))
else:
p = static_pressure
qi = gamma/2 * p * mach**2
return qi
def vel_from_stag_temp(self, stagnation_temperature, static_temperature):
"""
Flow velocity from the kinetic energy of the fluid, as the difference
between the stagnation and static temperatures.
Inputs:
-------
stagnation_temperature: float. Stagnation temperature of the gas [K]
static_temperature: float. Static temperature of the gas [K]
Outputs:
--------
vel: float. Flow velocity [m/s]
"""
if not (0<static_temperature<6000 and 0<stagnation_temperature<6000):
# Avoid negative temperatures
raise ValueError("A valid temperatures must be provided.")
elif not stagnation_temperature>static_temperature:
# Avoid negative sqrt
raise ValueError("Stagnation temperature must be greater than static temperature.")
else:
T = static_temperature
Tt = stagnation_temperature
#difcpT = (self.fluid.cp(Tt)*Tt - self.fluid.cp(T)*T)
#vel = np.sqrt(2*difcpT)
vel = np.sqrt(2*self.fluid.cp(T)*(Tt-T))
return vel
def vel_from_mach(self, mach, static_temperature):
"""
Flow velocity from the Mach number of the fluid.
Inputs:
-------
mach: float. Mach number [dimensionless]
static_temperature: float. Static temperature of the gas [K]
Outputs:
--------
vel: float. Flow velocity [m/s]
"""
if not 0<static_temperature<6000:
# Avoid negative temperature
raise ValueError("Static temperature {}K out of bounds [0-6000]K".format(static_temperature))
else:
T = static_temperature
a = self.sound_speed(T)
vel = a*mach
return vel
def stag_density_from_mach(self, mach, static_density, static_temperature=None):
"""
Calculates the stagnation density given the Mach number and the local
static density.
In case a semiperfect gas is used (gamma is a function of the temperature)
a temperature must be provided to gather the heat capacity ratio.
Inputs:
-------
mach: float. Mach number of the fluid [non-dimensional]
static_density: float. Local static density [kg/m**3]
static_temperature: float. Static temperature of the gas [K]
If no static temperature is provided T_ref=298.15K is
taken if fluid is Perfect gas, otherwise a valid static
temperature must be provided.
Outputs:
--------
rhot: float. stagnation density [Pa]
"""
if static_temperature is None:
if not isinstance(self.fluid, PerfectIdealGas):
raise ValueError("A valid static temperature must be provided")
else:
# If Perfect gas a temperature must be provided
gamma = self.fluid.gamma()
T = None
else:
# If Semiperfect gas a temperature must be provided
gamma = self.fluid.gamma(static_temperature)
if not 0<static_temperature<6000:
# Avoid negative temperatures
raise ValueError("Static temperature {}K out of bounds [0-6000]K".format(static_temperature))
else:
T = static_temperature
if not 0<static_density:
# Avoid negative density
raise ValueError("Static density {}kg/m**3 cannot be negative".format(static_density))
else:
rho = static_density
Tt_T = self.stagnation_static_rel(mach, T)
rhot = rho * (Tt_T)**(1/(gamma-1))
return rhot
def stat_temp_from_mach(self, mach, stagnation_temperature):
"""
Calculates the static temperature of an isentropic flow given the
Mach number and the stagnation temperature of the flow.
The solution is iterated to obtained the gamma(T) value correspondent to
the solution static_temperature.
Inputs:
-------
mach: float. Mach number of the fluid [dimensionless]
stagnation_temperature: float. Stagnation temperature of the gas [K]
Outputs:
--------
T: float. static temperature [K]
"""
if not 0<stagnation_temperature<6000:
# Avoid negative temperature
raise ValueError("Stagnation temperature {}K out of bounds [0-6000]K".format(stagnation_temperature))
else:
Tt = stagnation_temperature
## Iterate solution:
# Iteration flag and counter
iterate = True
niter = 0
# Assume static and stagnation temperatures initialliy have the same value to get a valid gamma(T) value
T = Tt
# Iterator:
while iterate:
niter += 1
# Store old T value and get a new one assuming gamma(T) is valid
T_0 = T
Tt_T = self.stagnation_static_rel(mach=mach, static_temperature=T)
T = Tt/Tt_T
# Get residual between old and new value of T
residual = (T-T_0)/T_0
if np.abs(residual)<self.iter_tolerance:
# If difference below tolerance, T is a valid solution
iterate = False
elif niter==self.niter_max:
# If maximum iterations are reached, a warning is displayed bi
warnings.warn("Warning: Maximum iterations ({}) exceeded at stat_temp_from_mach (pyturb.gas_models.isentropic_flow)".format(self.niter_max), UserWarning)
iterate = False
return T
def stat_temp_from_vel(self, vel, stagnation_temperature, mean_cp='False'):
"""
Calculates the static temperature of an isentropic flow given the
velocity and the stagnation temperature of the flow.
The static temperature from the stagnation temperature and the velocity is
obtained with:
ht = h + 0.5*v**2 --> T = (cp(Tt)*Tt)/cp(T) + 0.5*v**2/cp(T)
A simplification may be done assuming the cp(Tt)/cp(T) is almost 1. This
assumption is equivalent to calculate the static temperature with
stag_temp_from_mach.
In any case the solution is iterated to obtained the cp(T) corresponding to
the static temperature.
Inputs:
-------
vel: float. Velocity of the fluid [m/s]
stagnation_temperature: float. Stagnation temperature of the gas [K]
mean_cp: str. By default "False". If False: a simplified T=Tt-0.5*v**2/cp(T)
if calculated. If true T = (cp(Tt)*Tt)/cp(T) + 0.5*v**2/cp(T).
Outputs:
--------
T: float. static temperature [K]
"""
if not 0<stagnation_temperature<6000:
# Avoid negative temperature
raise ValueError("Stagnation temperature {}K out of bounds [0-6000]K".format(stagnation_temperature))
else:
Tt = stagnation_temperature
# cp value at Tt:
if mean_cp.lower()=='false':
use_mean_cp = False
elif mean_cp.lower()=='true':
cpt = self.fluid.cp(Tt)
use_mean_cp = True
else:
warnings.warn('Unknown mean_cp option: {}. Changing to mean_cp=False'.format(mean_cp), UserWarning)
use_mean_cp = False
## Iterate solution:
# Iteration flag and counter
iterate = True
niter = 0
# Assume static and stagnation temperatures initialliy have the same value to get a valid gamma(T) value
T = Tt
# Iterator:
while iterate:
niter += 1
# Store old T value and get a new one assuming gamma(T) is valid
T_0 = T
cp0 = self.fluid.cp(T)
if use_mean_cp:
T = cpt/cp0*Tt - 0.5*vel**2/cp0
else:
T = Tt - 0.5*vel**2/cp0
# Get residual between old and new value of T
residual = (T-T_0)/T_0
if np.abs(residual)<self.iter_tolerance:
# If difference below tolerance, T is a valid solution
iterate = False
elif niter==self.niter_max:
# If maximum iterations are reached, a warning is displayed bi
warnings.warn("Warning: Maximum iterations ({}) exceeded at stat_temp_from_vel (pyturb.gas_models.isentropic_flow)".format(self.niter_max), UserWarning)
iterate = False
return T
def stat_pressure_from_mach(self, mach, stagnation_pressure, stagnation_temperature=None):
"""
Calculates the static pressure of an isentropic flow given the
Mach number and the stagnation pressure of the flow.
The solution is iterated to obtained the gamma(T) value correspondent to
the static_temperature.
Inputs:
-------
mach: float. Mach number of the fluid [dimensionless]
stagnation_pressure: float. Stagnation pressure of the gas [Pa]
stagnation_temperature: float. Stagnation temperature of the gas [K]
Outputs:
--------
p: float. static pressure [Pa]
"""
if isinstance(self.fluid, PerfectIdealGas):
# Perfect gas
is_perfect_gas = True
gamma = self.fluid.gamma()
else:
# Semiperfect gas
is_perfect_gas = False
if stagnation_temperature is None:
raise ValueError("A valid stagnation temperature must be provided")
else:
if not 0<stagnation_temperature<6000:
# Avoid negative temperature
raise ValueError("Stagnation temperature {}K out of bounds [0-6000]K".format(stagnation_temperature))
else:
Tt = stagnation_temperature
if not 0<stagnation_pressure:
# Avoid negative pressure
raise ValueError("Static pressure {}Pa cannot be negative".format(stagnation_pressure))
else:
pt = stagnation_pressure
if is_perfect_gas:
# Perfect gas solution:
Tt_T = self.stagnation_static_rel(mach=mach, static_temperature=None)
p = pt/(Tt_T**(gamma/(gamma-1)))
else:
## Iterate solution:
# Iteration flag and counter
iterate = True
niter = 0
# Assume static and stagnation temperatures initialliy have the | |
areas " + \
validTime
phraseType = "NEW"
#
# Phrase for EXT
#
elif eachHazard['act'] == 'EXT':
phraseType = "NEW"
#prevExpPhrase = self.getHourAMPMTimeZone(\
# eachHazard['previousEnd'], eachHazard['id'])
prevRec = copy.deepcopy(eachHazard)
prevRec['endTime'] = eachHazard['previousEnd']
prevExpPhrase = self.getTimingPhrase(prevRec, issuanceTime)
attrPhrase = watchName + ", previously in effect " +\
prevExpPhrase + ", is now in effect " + \
validTime + " for the following areas"
#
# Generic Phrase...should never reach this point
#
else:
startingPhrase = "The National Weather Service" + \
" has issued |* watch type *| |* watch number *|" + \
" until |* watch end time *| for the following areas"
attrPhrase = startingPhrase
phraseType = "NEW"
#
# Add phrase to forecast
#
fcst = fcst + attrPhrase + '\n\n'
# Get the phrasing set up for the type of event
if phraseType == "NEW":
county1 = "In {area} this watch includes {number} {placeType}"
county2 = "In {area} this watch includes {number} {placeTypes}"
indepCity1 = "In {area} this watch includes {number} " + \
"independent city"
indepCity2 = "In {area} this watch includes {number} " + \
"independent cities"
marine = "This watch includes the following adjacent coastal waters"
elif phraseType == "CANCEL":
county1 = "In {area} this cancels {number} {placeType}"
county2 = "In {area} this cancels {number} {placeTypes}"
indepCity1 = "In {area} this cancels {number} INDEPENDENT CITY"
indepCity2 = "In {area} this cancels {number} INDEPENDENT CITIES"
marine = "This cancels the following adjacent coastal waters"
elif phraseType == "EXPIRE":
county1 = "In {area} this allows to expire {number} {placeType}"
county2 = "In {area} this allows to expire {number} {placeTypes}"
indepCity1 = "In {area} this allows to expire {number} " +\
"independent city"
indepCity2 = "In {area} this allows to expire {number} " +\
"independent cities"
marine = "This allows to expire the following adjacent coastal waters"
elif phraseType == "REPLACE":
county1 = "In {area} the new watch includes {number} {placeType}"
county2 = "In {area} the new watch includes {number} {placeTypes}"
indepCity1 = "In {area} the new watch includes {number} " + \
"independent city"
indepCity2 = "In {area} the new watch includes {number} " + \
"independent cities"
marine = "The new watch includes the following adjacent coastal waters"
else:
raise Exception, "Illegal phraseType in WCN formatter. " +\
"Expected NEW, CANCEL, EXPIRE, or REPLACE. Got " + phraseType
# Add the list of counties
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="COUNTY", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromCountyTuple(countyTuple,
mainFormatSingular = county1, mainFormatPlural = county2,
mode=self._statePartMode)
# Add the lists of independent cities
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="CITY", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromCountyTuple(countyTuple,
mainFormatSingular = indepCity1, mainFormatPlural = indepCity2,
mode=self._statePartMode)
# Add the lists of marine zones
countyTuple = self._getFilteredAreaList(
segmentAreas, mode="ZONE", areaDictName=self._areaDictionary)
fcst = fcst + self._makeTextFromMarineTuple(countyTuple,
mainFormat = marine)
# Add the lists of cities
fcst = fcst + "\n\n" + self.getCityList(
segmentAreas, areaDictName = self._areaDictionary, addPeriod=True,
forceAlphaSort=True)
#
# Line Wrap
#
fcst = self.endline(fcst, linelength=self._lineLength, breakStr=[" ", "...", "-"])
#
# Finished
#
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
fcst = fcst + "$$\n\n"
return fcst
def _countFilteredAreaList(self, countyTuples, index):
#Returns a dictionary. dictionary is based on the 'index' element
# of the tuple (key) and is a count of the number of those
# records found.
dict = {}
for values in countyTuples:
key = values[index]
count = dict.get(key, 0)
count = count + 1
dict[key] = count
return dict
def _getFilteredAreaList(self, areaList, areaDictName="AreaDictionary",
mode="COUNTY"):
#returns list of sorted tuples:
# [(state, partOfState, partOfState State, zonename)]
#mode='COUNTY','ZONE','CITY'
# Access the UGC information for the area(s) if available
areaDict = ModuleAccessor.ModuleAccessor().variable(areaDictName,
"AreaDictionary")
if areaDict is None:
return []
# sort by zone name
if mode == "ZONE":
areaList.sort()
# Make a list of (state, partOfStateAndState, county) tuples
countyList = []
for areaName in areaList:
if areaDict.has_key(areaName):
entry = areaDict[areaName]
else:
entry = {}
LogStream.logProblem(\
"AreaDictionary missing definition for [" + areaName + "].")
if mode == "COUNTY":
if len(areaName) == 6 and areaName[2] != "C": #not ssCnnn
continue #not a county fips
if entry.has_key("independentCity") and \
entry["independentCity"] == 1:
continue #independent city, when in county mode
elif mode == "CITY":
if len(areaName) == 6 and areaName[2] != "C": #not ssCnnn
continue #not a county/city fips
if not entry.has_key("independentCity") or \
entry["independentCity"] == 0:
continue #not independent city, when in city mode
elif mode == "ZONE":
if len(areaName) == 6 and areaName[2] != "Z": #not ssZnnn
continue #not a zone code
else:
raise Exception, "Illegal mode specified " + mode
if entry.has_key("ugcName") and len(entry['ugcName']):
# Get fullStateName
state = areaName[0:2]
if entry.has_key("fullStateName") and \
len(entry['fullStateName']):
state = entry["fullStateName"]
else:
state = "<fullStateName for " + state + " missing>"
LogStream.logProblem("AreaDictionary does not contain " +\
'fullStateName definition for ', areaName)
# Get part-of-state information with state (not for Zones)
if mode == "ZONE": #marine
partOfState = ""
else:
if entry.has_key("partOfState") and \
len(entry['partOfState']):
partOfState = entry["partOfState"] + ' ' + state
else:
partOfState = "<partOfState> " + state
LogStream.logProblem(\
"AreaDictionary does not contain " +\
'partOfState definition for ', areaName)
# Get county name
county = entry["ugcName"]
# Eliminate the name County and others, if in the name
if mode == "COUNTY":
val = ['County','Counties','Parish','Parishes']
for v in val:
county = county.replace(" " + v, "")
countyList.append((state, partOfState, county))
#missing ugcName
else:
countyList.append(("<ugcName>", "<ugcName>", areaName))
LogStream.logProblem("AreaDictionary does not contain " +\
'ugcName definition for ', areaName)
# Sort by state, part of state, then county
if mode != "ZONE":
countyList.sort() #state, partOfState, county
return countyList
def _makeTextFromMarineTuple(self, countyTuple, lineLength=66, colWidth=22,
mainFormat="This watch includes the following adjacent coastal waters"):
#countyTuple: (state, partOfStateAndState, name)
#extract out the marine zones
mzones = []
for state, partOfState, name in countyTuple:
mzones.append(name)
if len(mzones) == 0:
return ""
return mainFormat + "\n\n" + \
self.formatCountyColumns(mzones, colWidth, lineLength) + '\n\n'
def _makeTextFromCountyTuple(self, countyTuple, lineLength=66, colWidth=22,
mainFormatSingular="In {area} this watch includes {number} {placeType}",
mainFormatPlural="In {area} this watch includes {number} {placeTypes}",
subFormat="In {area}", mode="byState"):
#countyTuple: (state, partOfStateAndState, name)
#The type of text depends upon the mode: "byState" or "byPart"
# "byState" formatting:
# mainFormatSingular/mainFormatPlural (for each state)
# subFormat (for each partOfState)
# column formatting of names
#
# "byPart" formatting:
# (subFormat not used):
# mainFormatSingular/mainFormatPlural (for each partOfState State)
# column formatting of names
# Format
if mode == "byState":
return self._byStateTextFromCountyTuple(countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural, subFormat)
elif mode == "byPart":
return self._byPartTextFromCountyTuple(countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural)
else:
raise Exception, "Illegal mode in makeTextFromCountyTuple(): " +\
`mode`
def _byStateTextFromCountyTuple(self, countyTuple, lineLength,
colWidth, mainFormatSingular, mainFormatPlural, subFormat):
#Determine counts for each area
counts = self._countFilteredAreaList(countyTuple, 0)
# Convert countyTuple into format that follows the text format
# byState: [(state, [(partOfStateAndState, [names])]]
geoList = []
geoPList = []
names = []
curState = None
curPart = None
for state, partState, name in countyTuple:
if curState == state:
if curPart == partState:
names.append(name)
else:
if len(names):
geoPList.append((curPart, names))
names = [name]
curPart = partState
else:
if len(names):
geoPList.append((curPart, names))
if len(geoPList):
geoList.append((curState, geoPList))
geoPList = []
names = [name]
curPart = partState
curState = state
if len(names):
geoPList.append((curPart, names))
geoList.append((curState, geoPList))
# Now Format the text
result = ''
for state, partStateNames in geoList:
#special District of Columbia, no parts of state descriptors
if state == "District of Columbia":
result = result + "The District of Columbia\n\n"
continue
ccount = counts.get(state, 0)
if ccount > 1:
header = mainFormatPlural
else:
header = mainFormatSingular
header = string.replace(header, '{area}', state)
header = string.replace(header, '{number}', str(ccount))
if state == "Louisiana":
header = string.replace(header, '{placeType}', "parish")
header = string.replace(header, '{placeTypes}', "parishes")
else:
header = string.replace(header, '{placeType}', "county")
header = string.replace(header, '{placeTypes}', "counties")
result = result + header + '\n\n'
for partState, names | |
<filename>heat/core/tests/test_manipulations.py
import numpy as np
import torch
import heat as ht
from .test_suites.basic_test import TestCase
class TestManipulations(TestCase):
def test_column_stack(self):
# test local column_stack, 2-D arrays
a = np.arange(10, dtype=np.float32).reshape(5, 2)
b = np.arange(15, dtype=np.float32).reshape(5, 3)
np_cstack = np.column_stack((a, b))
ht_a = ht.array(a)
ht_b = ht.array(b)
ht_cstack = ht.column_stack((ht_a, ht_b))
self.assertTrue((np_cstack == ht_cstack.numpy()).all())
# 2-D and 1-D arrays
c = np.arange(5, dtype=np.float32)
np_cstack = np.column_stack((a, b, c))
ht_c = ht.array(c)
ht_cstack = ht.column_stack((ht_a, ht_b, ht_c))
self.assertTrue((np_cstack == ht_cstack.numpy()).all())
# 2-D and 1-D arrays, distributed
c = np.arange(5, dtype=np.float32)
np_cstack = np.column_stack((a, b, c))
ht_a = ht.array(a, split=1)
ht_b = ht.array(b, split=1)
ht_c = ht.array(c, split=0)
ht_cstack = ht.column_stack((ht_a, ht_b, ht_c))
self.assertTrue((ht_cstack.numpy() == np_cstack).all())
self.assertTrue(ht_cstack.split == 1)
# 1-D arrays, distributed, different dtypes
d = np.arange(10).astype(np.float32)
e = np.arange(10)
np_cstack = np.column_stack((d, e))
ht_d = ht.array(d, split=0)
ht_e = ht.array(e, split=0)
ht_cstack = ht.column_stack((ht_d, ht_e))
self.assertTrue((ht_cstack.numpy() == np_cstack).all())
self.assertTrue(ht_cstack.dtype == ht.float32)
self.assertTrue(ht_cstack.split == 0)
# test exceptions
f = ht.random.randn(5, 4, 2, split=1)
with self.assertRaises(ValueError):
ht.column_stack((a, b, f))
def test_concatenate(self):
# cases to test:
# Matrices / Vectors
# s0 s1 axis
# None None 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# None 0 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=0)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None 0 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# None 1 1
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=1)
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
#
# None 1 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=1)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # =============================================
# # 0 None 0
x = ht.zeros((16, 15), split=0)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 0 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# 1 None 0
x = ht.zeros((16, 15), split=1)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 1 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15), split=0)
y = ht.ones((16, 15), split=0)
# # 0 0 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 0 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15), split=1)
y = ht.ones((16, 15), split=1)
# 1 1 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 1 1 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15, 14), split=2)
y = ht.ones((16, 15, 14), split=2)
# 2 2 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 2 2 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 2 2 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
#
# =============================================
y = ht.ones((16, 15, 14), split=None)
# 2 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 2 None 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
res = ht.concatenate((x, y), axis=-1)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15, 14), split=None)
y = ht.ones((16, 15, 14), split=2)
# None 2 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
x = ht.zeros((16, 15, 14), split=None)
y = ht.ones((16, 15, 14), split=2)
# None 2 0
res = ht.concatenate((x, y, y), axis=0)
self.assertEqual(res.gshape, (32 + 16, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32 + 16, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None 2 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# vectors
# None None 0
x = ht.zeros((16,), split=None)
y = ht.ones((16,), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float)
# None 0 0
y = ht.ones((16,), split=0)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32,), res.split)
lshape = [0]
lshape[0] = chk[0].stop - chk[0].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 0 0
x = ht.ones((16,), split=0, dtype=ht.float64)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float64)
_, _, chk = res.comm.chunk((32,), res.split)
lshape = [0]
lshape[0] = chk[0].stop - chk[0].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 None 0
x = ht.ones((16,), split=0)
y = ht.ones((16,), | |
"""
Created on 2019-08-23
Updated on 2019-08-23
Company: DOSIsoft
Author: Jiacheng
"""
# =================Library==============================
# ====Basic librairie=======
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# =====Pour Neural Network====
import torch
import torch.nn as nn
from torch.autograd import Variable
# =====Pour Random Forest=====
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
# =================Parameters===========================
# =====Pour Neural Network=======
PATH_PARAMETERS_IMRT = './IMRT/checkpoint_lr5e-05_Epoch80_lambda0.0001.pth.tar'
PATH_PARAMETERS_VMAT= './VMAT/checkpoint_InNode9lr5e-05_Epoch80_lambda0.0001_VMAT_.pth.tar'
PATH_PARAMETERS_STATIC = './Static/checkpoint_lr5e-05_Epoch80_lambda0.0001.pth.tar'
# =====Pour Random Forest========
PATH_PARAMETERS_STATIC_RF = './RandomForest/RandomForest_static_1,3,5,6,7_depth_26_estimator_19_features_11.pkl'
PATH_PARAMETERS_EXACT_RF = './RandomForest/RandomForest_depth_26_estimator_19_features_11.pkl'
# ================ Basic function =======================
def normalization(arr):
"""normalize the array
Args:
arr: array
Return:
(array): normalization of the array
"""
return (arr - np.min(arr)) / (np.max(arr) - np.min(arr))
def standarization(arr):
"""standardize the array
Args:
arr: array
Return:
(array): standard of the array
"""
return (arr - np.mean(arr)) / np.std(arr)
def colorbar(image):
"""draw the colorbar of an image
Args:
image: image array
Returns:
color bar
"""
ax = image.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(image, cax=cax)
def getPixelSize(Path):
_, pixel_size = read_information(Path)
return pixel_size
def getMaxvalue(image):
return np.max(image)
def getMinvalue(image):
return np.min(image)
# =============== Generate the image ==================
def read_information(Path):
"""Read the information of the image
Args:
Path : the path of the image
Returns:
ImPred : matrix of the image
pixel_size : pixel size of the image, [0] is pixel size on x axis, [1] is pixel size on y axis
"""
file_in = open(Path, "r")
# define a little-endian int32 type
dt = np.dtype('<i4')
# get the number of the pixel of image
DataInt = np.fromfile(file_in, dtype=dt, count=2)
Nrows = DataInt[0]
Ncols = DataInt[1]
# get the width and height of the image
Size = np.fromfile(file_in, dtype='<f', count=4)
width = Size[2] - Size[0]
height = Size[3] - Size[1]
pixel_size_x = float(width / Ncols)
pixel_size_y = float(height / Nrows)
pixel_size = [pixel_size_x, pixel_size_y]
# Read all the intensity of the image
ImData = np.fromfile(file_in, dtype='<f')
file_in.close()
# Resize to an image
ImPred = np.reshape(ImData, (Ncols, Nrows))
return ImPred, pixel_size
def get_image(Path, isInput=True):
""" Read the information of the images
Args:
Path : the path of the image
isInput : whether it is the input of Neural Network
Returns:
ImPred : matrix of the image
pixel_size : pixel size of the image, [0] is pixel size on x axis, [1] is pixel size on y axis
Raises:
IOError: An error occurred if it can't read the file
"""
path = []
try:
for root, dirs, files in os.walk(Path):
for file in files:
path.append(os.path.join(root, file))
# simulation is Y(Output), acauired is X(Input)
if path[0].split('/')[-1].split('.')[0].split('_')[-1] == 'simulation':
path_X = path[1]
path_Y = path[0]
else:
path_X = path[0]
path_Y = path[1]
if isInput:
path = path_X
else:
path = path_Y
except IOError:
print("Error: Can't find the file!")
else:
ImPred, pixel_size = read_information(path)
return ImPred, pixel_size
# =====Pour Neural Network==========
def generate(Path, isNormalize=False):
"""Generate all the input variables -- (9 features)
Args:
Path : the path of the image input and output
isNormalize: Normalize the input data or not
Returns:
X: input of the Neural Network
Y_: the correct result of the input
"""
Img_X, _ = read_information(Path=Path)
# Padding the Img X
# minimum value in X like zero padding
minimum = np.min(Img_X)
Img_X = np.pad(Img_X, ((1, 1), (1, 1)), 'constant', constant_values=(minimum, minimum))
if isNormalize:
Data = normalization(Img_X)
else:
Data = Img_X
# Calculate the dimension of X and Y
Ncols = Data.shape[1] - 2
Nrows = Data.shape[0] - 2
n = Ncols * Nrows
m = 9
# Initialize input X
X = np.zeros((n, m), dtype=float)
# store the position, intensities
for i in range(n):
pos_i = int(i / Ncols + 1)
pos_j = int(i % Ncols + 1)
X[i][0] = Data[pos_i][pos_j] # X(i,j)
X[i][1] = Data[pos_i - 1][pos_j - 1] # X(i-1,j-1)
X[i][2] = Data[pos_i - 1][pos_j] # X(i-1,j)
X[i][3] = Data[pos_i - 1][pos_j + 1] # X(i-1,j+1)
X[i][4] = Data[pos_i][pos_j - 1] # X(i,j-1)
X[i][5] = Data[pos_i][pos_j + 1] # X(i,j+1)
X[i][6] = Data[pos_i + 1][pos_j - 1] # X(i+1,j-1)
X[i][7] = Data[pos_i + 1][pos_j] # X(i+1,j)
X[i][8] = Data[pos_i + 1][pos_j + 1] # X(i+1,j+1)
return X
# ======Pour Random Forest==============
def generate_RF(Path, isNormalize=False):
"""Generate all the input variables -- (18 features)
Args:
Path : the path of the image input and output
isNormalize: Normalize the input data or not
Returns:
X: input of Random Forest
"""
Img_X, _ = read_information(Path=Path)
# Padding the Img X
# minimum value in X like zero padding
minimum = np.min(Img_X)
Img_X = np.pad(Img_X, ((1, 1), (1, 1)), 'constant', constant_values=(minimum, minimum))
if isNormalize:
Data = normalization(Img_X)
else:
Data = Img_X
# Calculate the dimension of X and Y
Ncols = Data.shape[1] - 2
Nrows = Data.shape[0] - 2
n = Ncols * Nrows
m = 18
# Initialize input X
X = np.zeros((n, m), dtype=float)
# store the position, intensities
for i in range(n):
pos_i = int(i / Ncols + 1)
pos_j = int(i % Ncols + 1)
X[i][0] = Data[pos_i][pos_j] # X(i,j)
X[i][1] = Data[pos_i - 1][pos_j - 1] # X(i-1,j-1)
X[i][2] = Data[pos_i - 1][pos_j] # X(i-1,j)
X[i][3] = Data[pos_i - 1][pos_j + 1] # X(i-1,j+1)
X[i][4] = Data[pos_i][pos_j - 1] # X(i,j-1)
X[i][5] = Data[pos_i][pos_j + 1] # X(i,j+1)
X[i][6] = Data[pos_i + 1][pos_j - 1] # X(i+1,j-1)
X[i][7] = Data[pos_i + 1][pos_j] # X(i+1,j)
X[i][8] = Data[pos_i + 1][pos_j + 1] # X(i+1,j+1)
X[i][9] = X[i][0] ** 2 # X(i,j)
X[i][10] = X[i][1] ** 2 # X(i-1,j-1)
X[i][11] = X[i][2] ** 2 # X(i-1,j)
X[i][12] = X[i][3] ** 2 # X(i-1,j+1)
X[i][13] = X[i][4] ** 2 # X(i,j-1)
X[i][14] = X[i][5] ** 2 # X(i,j+1)
X[i][15] = X[i][6] ** 2 # X(i+1,j-1)
X[i][16] = X[i][7] ** 2 # X(i+1,j)
X[i][17] = X[i][8] ** 2
return X
# ===============================Neural Network=====================================================
# ========Parametres Basic==============
INPUT_NODE = 9
HIDDEN_LAYER1_NODE = 30
HIDDEN_LAYER2_NODE = 5
HIDDEN_LAYER3_NODE = 1
OUTPUT_NODE = 1
# =================Class of Neural Network==============
class Neural_Network(nn.Module):
def __init__(self, input_dim, hidden1_dim, hidden2_dim, hidden3_dim, output_dim):
super(Neural_Network, self).__init__()
self.ANN = nn.Sequential(
# 1
nn.Linear(input_dim, hidden1_dim),
nn.Tanh(),
# 2
nn.Linear(hidden1_dim, hidden2_dim),
nn.Tanh(),
# 3
nn.Linear(hidden2_dim, hidden3_dim),
nn.Sigmoid(),
)
# Linear function for increasing value: 1 --> 1
self.out = nn.Linear(hidden3_dim, output_dim)
def forward(self, X):
y = self.ANN(X)
# Increasing the value
out = self.out(y)
return out
# ================== Conversion by Neural Network =======================
def Conversion_ANN(X, isStatic=False, isVMAT=False):
"""Test for other image
Args:
X: input of the image
isStatic: is it the image static
isVMAT: is it the image VMAT
Returns:
prediction: the predict image dosimétrique of the input X
Raises:
Exception: can't find the model of Neural Network
"""
# Tensor Processing
X = torch.from_numpy(X)
# Model Basic
model = Neural_Network(INPUT_NODE, HIDDEN_LAYER1_NODE, HIDDEN_LAYER2_NODE, HIDDEN_LAYER3_NODE, OUTPUT_NODE)
# Check whether there is a model
if isStatic:
PATH_PARAMETERS = PATH_PARAMETERS_STATIC
elif isVMAT:
PATH_PARAMETERS = PATH_PARAMETERS_VMAT
else:
PATH_PARAMETERS = PATH_PARAMETERS_IMRT
IsExists = os.path.exists(PATH_PARAMETERS)
if IsExists:
print("Model exists, begin test!!!")
# Get the Parameters of Model
checkpoint = torch.load(PATH_PARAMETERS)
model.load_state_dict(checkpoint['model_state_dict'])
else:
print("No model, try to find it!!!")
return None
# Predict the Target
prediction = model(X.float())
return prediction.detach().numpy()
def get_results_ANN(path, isStatic=False, isVMAT=False):
"""Get all the result of the test
Args:
path: path of the image EPID
isStatic: conversion for image static (true or false)
isStatic: conversion for image VMAT (true or false)
Returns:
Accuracy: a text file storing the gamma index of all the test images
Comparision Image: the predict image, the gamma index immage and the origin image
Raises:
IOError: An error occurred if it can't read the file
"""
# Basic for Normalization of the test image
if isStatic:
Init_Array = np.load('/Static/Init.npz')
elif isVMAT:
Init_Array = np.load('./VMAT/Init_InNode9lr5e-05_Epoch80_lambda0.0001_VMAT_.npz')
else:
Init_Array = np.load('./IMRT/Init_lr5e-05_Epoch80_lambda0.0001_9_10_11_14_21_22_.npz')
ranges = Init_Array['Ranges']
minValues = Init_Array['MinValues']
X = generate(Path=path, isNormalize=False)
X = (X - minValues) / ranges
# Prediction with Model
Y_pre = Conversion_ANN(X, isStatic, isVMAT)
return Y_pre
# ================================================ Random Forest =======================================================
# =================Conversion by Random Forest===============
def Conversion_RF(X, isStatic=False):
"""Test for other image
Args:
X: input of the image
Returns:
y_pred: the predict image y of the input X
Raises:
Exception: can't find the model of Neural Network
"""
# Check whether there is a model
if isStatic:
PATH_PARAMETERS | |
import logging
from logging.handlers import QueueHandler
from multiprocessing import Queue
import numpy as np
import os
import signal
from time import time
from utils.analyzer import VideoAnalyzer
from utils.signalstateanalyzer import SignalVideoAnalyzer
from utils.event import Trip
from utils.io import IO
from utils.timestamp import Timestamp
path = os.path
def configure_logger(log_level, log_queue):
root_logger = logging.getLogger(__name__)
if root_logger.hasHandlers(): # Clear any handlers to avoid duplicate entries
root_logger.handlers.clear()
root_logger.setLevel(log_level)
queue_handler = QueueHandler(log_queue)
root_logger.addHandler(queue_handler)
def should_crop(frame_width, frame_height, do_crop, crop_width, crop_height,
crop_x, crop_y):
if do_crop:
if all(
[frame_width >= crop_width > 0, frame_height >= crop_height > 0,
frame_width > crop_x >= 0, frame_height > crop_y >= 0]):
logging.info(
'video frames will be cropped from [w={}:h={}:x={}:y={}]'.format(
crop_width, crop_height, crop_x, crop_y))
return True
else:
raise ValueError(
'video frames cannot be cropped from [w={}:h={}:x={}:y={}] because the '
'video dimensions are [w={}:h={}]'.format(
crop_width, crop_height, crop_x, crop_y, frame_width, frame_height))
else:
logging.debug('video frames will not be cropped')
return False
def should_extract_timestamps(
frame_width, frame_height, do_extract_timestamps, timestamp_max_width,
timestamp_height, timestamp_x, timestamp_y):
if do_extract_timestamps:
if all([frame_width >= timestamp_x + timestamp_max_width > 0,
frame_height >= timestamp_y + timestamp_height > 0]):
logging.info(
'timestamps will be extracted from [w={}:h={}:x={}:y={}]'.format(
timestamp_max_width, timestamp_height, timestamp_x, timestamp_y))
return True
else:
raise ValueError(
'timestamps cannot be extracted from [w={}:h={}:x={}:y={}] because the '
'video dimensions are [w={}:h={}]'.format(
timestamp_max_width, timestamp_height, timestamp_x,
timestamp_y, frame_width, frame_height))
else:
logging.debug('timestamps will not be extracted')
return False
def process_video(
video_file_path, output_dir_path, class_name_map, model_name,
model_signature_name, model_server_host, model_input_size,
return_code_queue, log_queue, log_level, ffmpeg_path, ffprobe_path,
do_crop, crop_width, crop_height, crop_x, crop_y, do_extract_timestamps,
timestamp_max_width, timestamp_height, timestamp_x, timestamp_y,
do_deinterlace, num_channels, batch_size, do_smooth_probs,
smoothing_factor, do_binarize_probs, do_write_inference_reports,
do_write_event_reports, max_threads, processor_mode):
configure_logger(log_level, log_queue)
interrupt_queue = Queue()
# Create a output subdirectory for the current mode
output_dir_path = path.join(output_dir_path, processor_mode)
def interrupt_handler(signal_number, _):
logging.warning('received interrupt signal {}.'.format(signal_number))
interrupt_queue.put_nowait('_')
# TODO: cancel timestamp/report generation when an interrupt is signalled
# logging.debug('instructing inference pipeline to halt.')
# child_interrupt_queue.put_nowait('_')
signal.signal(signal.SIGINT, interrupt_handler)
video_file_name = path.basename(video_file_path)
video_file_name, _ = path.splitext(video_file_name)
logging.info('preparing to analyze {}'.format(video_file_path))
output_files = []
try:
start = time()
frame_width, frame_height, num_frames, _ = IO.get_video_dimensions(
video_file_path, ffprobe_path)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'read video dimensions in')
logging.info(processing_duration)
except Exception as e:
logging.error('encountered an unexpected error while fetching video '
'dimensions')
logging.error(e)
logging.debug(
'will exit with code: exception and value get_video_dimensions')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'get_video_dimensions'})
return_code_queue.close()
return
try:
do_crop = should_crop(frame_width, frame_height, do_crop, crop_width,
crop_height, crop_x, crop_y)
except Exception as e:
logging.error(e)
logging.debug('will exit with code: exception and value should_crop')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'should_crop'})
return_code_queue.close()
return
logging.debug('Constructing ffmpeg command')
ffmpeg_command = [ffmpeg_path, '-i', video_file_path]
if do_deinterlace:
ffmpeg_command.append('-deinterlace')
ffmpeg_command.extend(
['-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
'-hide_banner', '-loglevel', '0', '-f', 'image2pipe', 'pipe:1'])
try:
do_extract_timestamps = should_extract_timestamps(
frame_width, frame_height, do_extract_timestamps, timestamp_max_width,
timestamp_height, timestamp_x, timestamp_y)
except Exception as e:
logging.error(e)
logging.debug(
'will exit with code: exception and value should_extract_timestamps')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'should_extract_timestamps'})
return_code_queue.close()
return
frame_shape = [frame_height, frame_width, num_channels]
logging.debug('FFmpeg output frame shape == {}'.format(frame_shape))
#TODO parameterize tf serving values
analyzer = VideoAnalyzer(
frame_shape, num_frames, len(class_name_map), batch_size, model_name,
model_signature_name, model_server_host, model_input_size,
do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height,
timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height,
ffmpeg_command, max_threads)
try:
start = time()
num_analyzed_frames, probability_array, timestamp_array = analyzer.run()
end = time()
analysis_duration = end - start
processing_duration = IO.get_processing_duration(
analysis_duration, 'processed {} frames in'.format(num_analyzed_frames))
logging.info(processing_duration)
if num_analyzed_frames != num_frames:
if interrupt_queue.empty():
raise AssertionError('num_analyzed_frames ({}) != num_frames '
'({})'.format(num_analyzed_frames, num_frames))
else:
raise InterruptedError('num_analyzed_frames ({}) != num_frames '
'({})'.format(num_analyzed_frames, num_frames))
except InterruptedError as ae:
logging.error(ae)
logging.debug('will exit with code: interrupt and value: analyze_video')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'interrupt',
'return_value': 'analyze_video'})
return_code_queue.close()
return
except AssertionError as ae:
logging.error(ae)
logging.debug(
'will exit with code: assertion error and value: analyze_video')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'assertion error',
'return_value': 'analyze_video'})
return_code_queue.close()
return
except Exception as e:
logging.error('encountered an unexpected error while analyzing {}'.format(
video_file_name))
logging.error(e)
logging.debug(
'will exit with code: exception and value: analyze_video')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'exception',
'return_value': 'analyze_video'})
return_code_queue.close()
return
logging.debug('converting timestamp images to strings')
if do_extract_timestamps:
try:
start = time()
timestamp_object = Timestamp(timestamp_height, timestamp_max_width)
timestamp_strings, qa_flags = \
timestamp_object.stringify_timestamps(timestamp_array)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'timestamp strings converted in')
logging.info(processing_duration)
except Exception as e:
logging.error('encountered an unexpected error while converting '
'timestamp image crops to strings'.format(os.getpid()))
logging.error(e)
logging.debug(
'will exit with code: exception and value: stringify_timestamps')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'exception',
'return_value': 'stringify_timestamps'})
return_code_queue.close()
return
else:
timestamp_strings = None
qa_flags = None
logging.debug('attempting to generate reports')
if do_write_inference_reports:
try:
start = time()
inf_report = IO.write_inference_report(
video_file_name, output_dir_path, analyzer.prob_array, class_name_map,
timestamp_strings, qa_flags, do_smooth_probs, smoothing_factor,
do_binarize_probs)
output_files.append(inf_report)
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'generated inference reports in')
logging.info(processing_duration)
except Exception as e:
logging.error(
'encountered an unexpected error while generating inference report.')
logging.error(e)
logging.debug(
'will exit with code: exception and value: write_inference_report')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'exception',
'return_value': 'write_inference_report'})
return_code_queue.close()
return
try:
start = time()
if do_smooth_probs:
probability_array = IO.smooth_probs(
probability_array, smoothing_factor)
frame_numbers = list(range(1, len(probability_array) + 1))
if timestamp_strings is not None:
timestamp_strings = timestamp_strings.astype(np.int32)
trip = Trip(frame_numbers, timestamp_strings, qa_flags, probability_array,
class_name_map)
if processor_mode == "weather":
if len(trip.feature_sequence) > 0:
logging.info('{} weather events were found in {}'.format(
len(trip.feature_sequence), video_file_name))
if do_write_event_reports:
weather_rep =IO.write_weather_report(video_file_name, output_dir_path, trip.feature_sequence)
output_files.append(weather_rep)
else:
events = trip.find_work_zone_events()
if len(events) > 0:
logging.info('{} work zone events were found in {}'.format(
len(events), video_file_name))
if do_write_event_reports:
event_rep = IO.write_event_report(video_file_name, output_dir_path, events)
output_files.append(event_rep)
else:
logging.info(
'No work zone events were found in {}'.format(video_file_name))
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'generated event reports in')
logging.info(processing_duration)
except Exception as e:
logging.error(
'encountered an unexpected error while generating event report.')
logging.error(e)
logging.debug(
'will exit with code: exception and value: write_event_report')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'exception',
'return_value': 'write_event_report'})
return_code_queue.close()
return
logging.debug(
'will exit with code: success and value: {}'.format(num_analyzed_frames))
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'success',
'return_value': num_analyzed_frames,
'analysis_duration': analysis_duration,
'output_locations': str(output_files)})
return_code_queue.close()
def process_video_signalstate(
video_file_path, output_dir_path, class_name_map, model_name,
model_signature_name, model_server_host, model_input_size,
return_code_queue, log_queue, log_level, ffmpeg_path, ffprobe_path,
do_crop, crop_width, crop_height, crop_x, crop_y, do_extract_timestamps,
timestamp_max_width, timestamp_height, timestamp_x, timestamp_y,
do_deinterlace, num_channels, batch_size, do_smooth_probs,
smoothing_factor, do_binarize_probs, do_write_bbox_reports,
do_write_event_reports, max_threads, processor_mode):
configure_logger(log_level, log_queue)
interrupt_queue = Queue()
# Create a output subdirectory for the current mode
output_dir_path = path.join(output_dir_path, processor_mode)
output_files = []
def interrupt_handler(signal_number, _):
logging.warning('received interrupt signal {}.'.format(signal_number))
interrupt_queue.put_nowait('_')
# TODO: cancel timestamp/report generation when an interrupt is signalled
# logging.debug('instructing inference pipeline to halt.')
# child_interrupt_queue.put_nowait('_')
signal.signal(signal.SIGINT, interrupt_handler)
video_file_name = path.basename(video_file_path)
video_file_name, _ = path.splitext(video_file_name)
logging.info('preparing to signalstate analyze {}'.format(video_file_path))
try:
start = time()
# For signal state, we use duration as num_frames, as we will only grab one frame per second
frame_width, frame_height, num_frames, duration = IO.get_video_dimensions(
video_file_path, ffprobe_path)
num_frames = duration
end = time() - start
processing_duration = IO.get_processing_duration(
end, 'read video dimensions in')
logging.info(processing_duration)
except Exception as e:
logging.error('encountered an unexpected error while fetching video '
'dimensions')
logging.error(e)
logging.debug(
'will exit with code: exception and value get_video_dimensions')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'get_video_dimensions'})
return_code_queue.close()
return
try:
do_crop = should_crop(frame_width, frame_height, do_crop, crop_width,
crop_height, crop_x, crop_y)
except Exception as e:
logging.error(e)
logging.debug('will exit with code: exception and value should_crop')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'should_crop'})
return_code_queue.close()
return
logging.debug('Constructing ffmpeg command')
ffmpeg_command = [ffmpeg_path, '-i', video_file_path]
if do_deinterlace:
ffmpeg_command.append('-deinterlace')
ffmpeg_command.extend(
['-vcodec', 'rawvideo', '-pix_fmt', 'rgb24', '-vsync', 'vfr',
'-hide_banner', '-loglevel', '0', '-r', '1', '-f', 'image2pipe', 'pipe:1'])
try:
do_extract_timestamps = should_extract_timestamps(
frame_width, frame_height, do_extract_timestamps, timestamp_max_width,
timestamp_height, timestamp_x, timestamp_y)
except Exception as e:
logging.error(e)
logging.debug(
'will exit with code: exception and value should_extract_timestamps')
log_queue.put(None)
log_queue.close()
return_code_queue.put(
{'return_code': 'exception', 'return_value': 'should_extract_timestamps'})
return_code_queue.close()
return
frame_shape = [frame_height, frame_width, num_channels]
logging.debug('FFmpeg output frame shape == {}'.format(frame_shape))
analyzer = SignalVideoAnalyzer(
frame_shape, num_frames, len(class_name_map), batch_size, model_name,
model_signature_name, model_server_host, model_input_size,
do_extract_timestamps, timestamp_x, timestamp_y, timestamp_height,
timestamp_max_width, do_crop, crop_x, crop_y, crop_width, crop_height,
ffmpeg_command, max_threads)
try:
start = time()
num_analyzed_frames, frame_map_array, timestamp_array = analyzer.run()
end = time()
analysis_duration = end - start
processing_duration = IO.get_processing_duration(
analysis_duration, 'processed {} frames in'.format(num_analyzed_frames))
logging.info(processing_duration)
except InterruptedError as ae:
logging.error(ae)
logging.debug('will exit with code: interrupt and value: analyze_video')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'interrupt',
'return_value': 'analyze_video'})
return_code_queue.close()
return
except AssertionError as ae:
logging.error(ae)
logging.debug(
'will exit with code: assertion error and value: analyze_video')
log_queue.put(None)
log_queue.close()
return_code_queue.put({'return_code': 'assertion error',
'return_value': 'analyze_video'})
return_code_queue.close()
return
except Exception | |
= "#FFE2E2")
pid_label.grid(row = 0, column = 0, padx = 5, pady = 3, sticky = W)
name_label.grid(row = 1, column = 0, padx = 5, pady = 3, sticky = W)
age_label.grid(row = 2, column = 0, padx = 5, pady = 3, sticky = W)
gender_label.grid(row = 3, column = 0, padx = 5, pady = 3, sticky = W)
contact_label.grid(row = 4, column = 0, padx = 5, pady = 3, sticky = W)
address_label.grid(row = 5, column = 0, padx = 5, pady = 3, sticky = W)
blood_group_label.grid(row = 6, column = 0, padx = 5, pady = 3, sticky = W)
for i in range(7):
colon = Label(old_details_frame, text = ":", font = "consolas 20 bold", bg = "#FFE2E2")
colon.grid(row = i, column = 1, sticky = W)
# old details
display_old_pid = Button(old_details_frame, text = old_details[0], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_name_pid = Button(old_details_frame, text = old_details[1], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_age_pid = Button(old_details_frame, text = old_details[2], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_gender_pid = Button(old_details_frame, text = old_details[3], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_contact_pid = Button(old_details_frame, text = old_details[4], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_address_pid = Button(old_details_frame, text = old_details[5], font = "consolas 19 bold",
width = 27, padx = 10, borderwidth = 1, bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_blood_group_pid = Button(old_details_frame, text = old_details[6],
font = "consolas 19 bold", width = 27, padx = 10, borderwidth = 1,
bg = "#FFE2E2", relief = GROOVE, anchor = W)
display_old_pid.grid(row = 0, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_name_pid.grid(row = 1, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_age_pid.grid(row = 2, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_gender_pid.grid(row = 3, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_contact_pid.grid(row = 4, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_address_pid.grid(row = 5, column = 2, columnspan = 3, padx = 20, ipadx = 120)
display_blood_group_pid.grid(row = 6, column = 2, columnspan = 3, padx = 20, ipadx = 120)
# update details
field_to_update_label = Label(update_frame, text = "Field", font = "consolas 19 bold", bg = "#FFE2E2")
updated_detail_label = Label(update_frame, text = "New Details", font = "consolas 19 bold", bg = "#FFE2E2")
field_to_update_label.grid(row = 0, column = 0, padx = 5, sticky = W)
updated_detail_label.grid(row = 1, column = 0, padx = 5, sticky = W)
for i in range(2):
colon = Label(update_frame, text = ":", font = "consolas 20 bold", bg = "#FFE2E2")
colon.grid(row = i, column = 1, sticky = W)
field_to_update_options = ["Name", "Age", "Gender", "Contact", "Address", "Blood Group"]
field_to_update = StringVar()
field_to_update.set("Name")
field_to_update_menu = OptionMenu(update_frame, field_to_update, *field_to_update_options)
field_to_update_menu.config(width = 55, height = 1, font = "consolas 15 bold")
field_to_update_menu.grid(row = 0, column = 2, columnspan = 3, padx = 15, pady = 5, sticky = W)
updated_detail_entry = Entry(update_frame, font = "consolas 17")
updated_detail_entry.grid(row = 1, column = 2, ipadx = 193, ipady = 5, padx = 15, pady = 10)
# update button
submit_button = Button(update_frame, text = "Update", font = "consolas 17 bold", bg = "#FF847C",
borderwidth = 7, padx = 10, pady = 4, command = modify_submit)
submit_button.grid(row = 5, column = 0, columnspan = 4, padx = 10, pady = 54, ipadx = 360)
submit_button.update()
speak("Please select the field that you want to change")
# about us window
def about_window():
global about
global display_profile_pic1
global display_profile_pic2
global display_profile_pic3
global final_img1
global final_img2
global final_img3
clean_right_frame()
# highlight clicked button
about.grid_forget()
about = Button(left_frame, text = "About", font = "consolas 19 bold", width = 27, padx = 10, pady = 4,
borderwidth = 7, bg = "#2A363B", fg = "#FF847C", anchor = CENTER, relief = SUNKEN,
command = about_window)
about.grid(row = 6, column = 0, pady = 5, padx = 20)
# title
about_title = Label(right_frame, text = "About Us", font = "evogria 26 bold",
width = 44, anchor = CENTER, bg = "#FFE2E2")
about_title.grid(row = 0, column = 0, padx = 5, pady = 5)
# main content
details_1 = str(
"Talking about the features of this Hospital Management System, " +
"this project is aimed for a completely " +
"computerised management of our fictional hospital CHASE HOSPITALS. " +
"A patient can register themselves, view their details " +
"and modify their details as well. They can see the Details of Doctors, " +
"view the Services offered by the hospital. " +
"They can also make an appointment to a particular doctor."
)
details_2 = str(
"This project is created by <NAME> and <NAME> " +
"as part of their 12th CS project 2020 - 2021, under " +
"the able and very helpful guidance of PGT Mr. ML <NAME>, " +
"Kendriya Vidyalaya No. 2 Delhi Cantt. All codes in this file " +
"is completely written by <NAME> and <NAME> only. " +
"All images and icons used under CC license."
)
about_project_label = Label(right_frame, text = "About Project",
font = "evogria 18", bg = "#FFE2E2")
about_project_label.grid(row = 1, column = 0, pady = 5, padx = 10, sticky = W)
display_details_1 = Label(right_frame, text = details_1, font = "consolas 14",
bg = "#FFE2E2", wraplength = 850, justify = LEFT)
display_details_1.grid(row = 2, column = 0, rowspan = 4, sticky = W, pady = 10, padx = 10)
about_developers_label = Label(right_frame, text = "About Developers",
font = "evogria 18", bg = "#FFE2E2")
about_developers_label.grid(row = 6, column = 0, pady = 10, padx = 10, sticky = W)
display_details_2 = Label(right_frame, text = details_2, font = "consolas 14",
bg = "#FFE2E2", wraplength = 850, justify = LEFT)
display_details_2.grid(row = 7, column = 0, rowspan = 4, sticky = W, padx = 10, pady = 10)
img3 = Image.open(r"Data\\Images\\Profile Pics\\meena_sir.jpg")
img1 = Image.open(r"Data\\Images\\Profile Pics\\lucifer.jpg")
img2 = Image.open(r"Data\\Images\\Profile Pics\\vats.jpg")
resized_img1 = img1.resize((175, 175), Image.ANTIALIAS)
resized_img2 = img2.resize((175, 175), Image.ANTIALIAS)
resized_img3 = img3.resize((175, 175), Image.ANTIALIAS)
final_img1 = ImageTk.PhotoImage(resized_img1)
final_img2 = ImageTk.PhotoImage(resized_img2)
final_img3 = ImageTk.PhotoImage(resized_img3)
display_profile_pic1 = Label(right_frame, image = final_img1, bd = 2, relief = SOLID)
display_profile_pic1.grid(row = 11, column = 0, padx = 30, pady = 5, sticky = W)
display_profile_pic2 = Label(right_frame, image = final_img2, bd = 2, relief = SOLID)
display_profile_pic2.grid(row = 11, column = 0, pady = 5)
display_profile_pic3 = Label(right_frame, image = final_img3, bd = 2, relief = SOLID)
display_profile_pic3.grid(row = 11, column = 0, padx = 60, pady = 5, sticky = E)
about_title_lucifer = Label(right_frame, text = "<NAME>",
font = "evogria 16", anchor = CENTER, bg = "#FFE2E2")
about_title_lucifer.grid(row = 12, column = 0, padx = 70, sticky = W)
about_title_vats = Label(right_frame, text = "<NAME>",
font = "evogria 16", anchor = CENTER, bg = "#FFE2E2")
about_title_vats.grid(row = 12, column = 0, padx = 5)
about_title_meena_sir = Label(right_frame, text = "<NAME>", font = "evogria 16", anchor = CENTER,
bg = "#FFE2E2")
about_title_meena_sir.grid(row = 12, column = 0, padx = 70, sticky = E)
about_title_meena_sir.update()
speak("This project is made by Udit and Robin under the guidance of <NAME> PGT(Computer Science)")
# | |
somename in group.items():
usename = somename
offset = 0.0
stratigraphic = False
if somename in strat:
logger.info("Found <%s> in stratigraphy", somename)
usename = strat[somename].get("name", somename)
stratigraphic = strat[somename].get("stratigraphic", False)
offset = rel[item].get("offset", 0.0)
else:
logger.error("Did not find <%s> in stratigraphy input", somename)
raise ValueError(f"Cannot find {somename} in stratigraphy input")
meta[item] = OrderedDict()
meta[item]["name"] = usename
meta[item]["stratigraphic"] = stratigraphic
meta[item]["offset"] = offset
def _data_process_content(self):
"""Process the content block (within data block) which can be complex."""
logger.info("Evaluate content")
content = self.dataio.content
logger.debug("content is %s of type %s", str(content), type(content))
meta = self.dataio.metadata4data
usecontent = "unset"
useextra = None
if content is None:
warnings.warn(
"The <content> is not provided which defaults to 'depth'. "
"It is strongly recommended that content is given explicitly!",
UserWarning,
)
usecontent = "depth"
elif isinstance(content, str):
if content in CONTENTS_REQUIRED:
raise ValidationError(f"content {content} requires additional input")
usecontent = content
elif isinstance(content, dict):
usecontent = (list(content.keys()))[0]
useextra = content[usecontent]
else:
raise ValidationError("content must be string or dict")
if usecontent not in ALLOWED_CONTENTS.keys():
raise ValidationError(
f"Invalid content: <{usecontent}>! "
f"Valid content: {', '.join(ALLOWED_CONTENTS.keys())}"
)
meta["content"] = usecontent
logger.debug("outgoing content is set to %s", usecontent)
if useextra:
self._data_process_content_validate(usecontent, useextra)
meta[usecontent] = useextra
else:
logger.debug("content has no extra information")
logger.debug("content was %s", content)
def _data_process_parent(self):
"""Process the parent block within data block.
A parent is only required for few datatypes, in particular a GridProperty
which will need a grid geometry name.
"""
logger.info("Evaluate parent")
parent = self.parent
meta = self.dataio.metadata4data
if self.classname == "cpgrid_property" and parent is None:
raise ValidationError("Input 'parent' is required for GridProperty!")
else:
if parent is None:
return
# evaluate 'parent' which can be a str or a dict
if isinstance(parent, str):
meta["parent"] = {"name": parent}
self.parent = parent
else:
if "name" not in parent:
raise ValidationError("Input 'parent' shall have a 'name' attribute!")
meta["parent"] = parent
self.parent = parent["name"]
@staticmethod
def _data_process_content_validate(name, fields):
logger.debug("starting staticmethod _data_process_content_validate")
valid = ALLOWED_CONTENTS.get(name, None)
if valid is None:
raise ValidationError(f"Cannot validate content for <{name}>")
logger.info("name: %s", name)
for key, dtype in fields.items():
if key in valid.keys():
wanted_type = valid[key]
if not isinstance(dtype, wanted_type):
raise ValidationError(
f"Invalid type for <{key}> with value <{dtype}>, not of "
f"type <{wanted_type}>"
)
else:
raise ValidationError(f"Key <{key}> is not valid for <{name}>")
required = CONTENTS_REQUIRED.get(name, None)
if isinstance(required, dict):
rlist = list(required.items())
logger.info("rlist is %s", rlist)
logger.info("fields is %s", fields)
rkey, status = rlist.pop()
logger.info("rkey not in fields.keys(): %s", str(rkey not in fields.keys()))
logger.info("rkey: %s", rkey)
logger.info("fields.keys(): %s", str(fields.keys()))
if rkey not in fields.keys() and status is True:
raise ValidationError(
f"The subkey <{rkey}> is required for content <{name}> ",
"but is not found",
)
def _data_process_timedata(self):
"""Process the time subfield and also construct self.times."""
# first detect if timedata is given, the process it
# timedata may be like:
# None
# [["20220101", "monitor"], ["20200101", "base"]]
# [["20220101", None], ["20200101", None]]
# [["20220101", "any"], None]
logger.info("Evaluate data:name attribute")
meta = self.dataio.metadata4data
datelimits = (18140517, 33000101)
if self.timedata is None:
return
# this is used in file name construction:
self.times = [] # e.g. ["20211102", "20231101"] or ["20211102", None]
# normally self.timedata (input) has two entries, but one may accepted,
# implicitly meaning the second item is None
usetimedata = deepcopy(self.timedata)
if len(usetimedata) == 1:
usetimedata.append(None)
for xtime in usetimedata:
if xtime is None:
self.times.append(None)
continue
if isinstance(xtime[0], int):
if xtime[0] < datelimits[0] or xtime[0] > datelimits[1]:
raise ValidationError(
"Integer date input seems to be outside reasonable "
f"limits: {datelimits}"
)
tdate = str(xtime[0])
tlabel = None
if len(xtime) > 1:
tlabel = xtime[1]
tdate = tdate.replace("-", "") # 2021-04-23 --> 20210403
if tdate and int(tdate) < datelimits[0] or int(tdate) > datelimits[1]:
raise ValidationError(
f"Date input outside reasonable limits: {datelimits}"
)
tdate = datetime.strptime(tdate, "%Y%m%d")
self.times.append(tdate)
tdate = tdate.strftime("%Y-%m-%dT%H:%M:%S")
if "time" not in meta:
meta["time"] = list()
usetime = OrderedDict()
usetime["value"] = tdate
if tlabel:
usetime["label"] = tlabel
meta["time"].append(usetime)
def _data_process_description(self):
"""Process the data.description item.
Description is described as an array in the schema. But intuitively it is
provided as a string. Also need to maintain backwards compatibility for
string as input.
If description is not given, return without action
If description is array, stringify all items
If description is string, convert to single-item array
"""
meta = self.dataio.metadata4data
if self.description is None:
return
if isinstance(self.description, list):
meta["description"] = [str(item) for item in self.description]
elif isinstance(self.description, str):
meta["description"] = [self.description]
def _data_process_various(self):
"""Process "all the rest" of the generic items.
i.e.::
unit,
vertical_domain
depth_reference
properties (as tmp)
grid_model
is_prediction
is_observation
"""
logger.info("Process various general items in data block")
meta = self.dataio.metadata4data
meta["unit"] = self.unit
(meta["vertical_domain"], meta["depth_reference"],) = list(
self.dataio.vertical_domain.items()
)[0]
meta["is_prediction"] = self.dataio.is_prediction
meta["is_observation"] = self.dataio.is_observation
# tmp:
meta["grid_model"] = None
def _data_process_object(self):
"""Process data fields which are object dependent.
I.e::
layout
spec
bbox
Note that 'format' field will be added in _item_to_file
"""
if self.subtype == "RegularSurface":
self._data_process_object_regularsurface()
elif self.subtype == "RegularCube":
self._data_process_object_regularcube()
elif self.subtype == "CPGrid":
self._data_process_cpgrid()
elif self.subtype == "CPGridProperty":
self._data_process_cpgridproperty()
elif self.subtype == "Polygons":
self._data_process_object_polygons()
elif self.subtype == "Points":
self._data_process_object_points()
elif self.subtype == "DataFrame":
self._data_process_object_dataframe()
elif self.subtype == "ArrowTable":
self._data_process_object_arrowtable()
def _data_process_cpgrid(self):
"""Process/collect the data items for Corner Point Grid"""
logger.info("Process data metadata for CP Grid")
dataio = self.dataio
grid = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "cornerpoint"
# define spec record
specs = grid.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
geox = grid.get_geometrics(cellcenter=False, allcells=True, return_dict=True)
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = round(float(geox["xmin"]), 4)
meta["bbox"]["xmax"] = round(float(geox["xmax"]), 4)
meta["bbox"]["ymin"] = round(float(geox["ymin"]), 4)
meta["bbox"]["ymax"] = round(float(geox["ymax"]), 4)
meta["bbox"]["zmin"] = round(float(geox["zmin"]), 4)
meta["bbox"]["zmax"] = round(float(geox["zmax"]), 4)
logger.info("Process data metadata for Grid... done!!")
def _data_process_cpgridproperty(self):
"""Process/collect the data items for Corner Point GridProperty"""
logger.info("Process data metadata for CPGridProperty")
dataio = self.dataio
gridprop = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "cornerpoint_property"
# define spec record
specs = OrderedDict()
specs["ncol"] = gridprop.ncol
specs["nrow"] = gridprop.nrow
specs["nlay"] = gridprop.nlay
meta["spec"] = specs
logger.info("Process data metadata for GridProperty... done!!")
def _data_process_object_regularsurface(self):
"""Process/collect the data items for RegularSurface"""
logger.info("Process data metadata for RegularSurface")
dataio = self.dataio
regsurf = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "regular"
# define spec record
specs = regsurf.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
meta["spec"]["undef"] = 1.0e30 # irap binary undef
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = float(regsurf.xmin)
meta["bbox"]["xmax"] = float(regsurf.xmax)
meta["bbox"]["ymin"] = float(regsurf.ymin)
meta["bbox"]["ymax"] = float(regsurf.ymax)
meta["bbox"]["zmin"] = float(regsurf.values.min())
meta["bbox"]["zmax"] = float(regsurf.values.max())
logger.info("Process data metadata for RegularSurface... done!!")
def _data_process_object_regularcube(self):
"""Process/collect the data items for RegularCube"""
logger.info("Process data metadata for RegularCube")
dataio = self.dataio
cube = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "regular"
# define spec record
specs = cube.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
meta["bbox"] = OrderedDict()
# current xtgeo is missing xmin, xmax etc attributes for cube, so need
# to compute (simplify when xtgeo has this):
xmin = 1.0e23
ymin = xmin
xmax = -1 * xmin
ymax = -1 * ymin
for corner in ((1, 1), (1, cube.nrow), (cube.ncol, 1), (cube.ncol, cube.nrow)):
xco, yco = cube.get_xy_value_from_ij(*corner)
xmin = xco if xco < xmin else xmin
xmax = xco if xco > xmax else xmax
ymin = yco if yco < ymin else ymin
ymax = yco if yco > ymax else ymax
meta["bbox"]["xmin"] = xmin
meta["bbox"]["xmax"] = xmax
meta["bbox"]["ymin"] = ymin
meta["bbox"]["ymax"] = ymax
meta["bbox"]["zmin"] = float(cube.zori)
meta["bbox"]["zmax"] = float(cube.zori + cube.zinc * (cube.nlay - 1))
logger.info("Process data metadata for RegularCube... done!!")
def _data_process_object_polygons(self):
"""Process/collect the data items for Polygons"""
logger.info("Process data metadata for Polygons/Polylines")
dataio = self.dataio
poly = self.obj
meta = dataio.metadata4data # shortform
meta["spec"] = OrderedDict()
# number of polygons:
meta["spec"]["npolys"] = np.unique(poly.dataframe[poly.pname].values).size
xmin, xmax, ymin, ymax, zmin, zmax | |
<filename>szndaogen/data_access/manager_base.py
import typing
from ..tools.log import Logger
from .db import DBI
from .model_base import ModelBase
from ..config import Config
class ManagerException(BaseException):
pass
class ViewManagerBase:
MODEL_CLASS = ModelBase
def __init__(self, dbi: DBI = None):
"""
Init function of base model manager class
:param dbi: Instance of database connector. If empty it will be created automatically. Instance of DBI is usualy used with combination of transaction wrapper @DBI.transaction("dbi")
"""
self.dbi = DBI() if dbi is None else dbi
self.bulk_insert_buffer_size = 50
self.bulk_insert_sql_statement = ""
self.bulk_insert_values_buffer = []
@classmethod
def create_model_instance(cls, init_data: dict = None) -> ModelBase:
if init_data is None:
init_data = {}
return cls.MODEL_CLASS(init_data)
def select_one(
self,
*args,
condition: str = "1",
condition_params: typing.Tuple = (),
projection: typing.Tuple = (),
order_by: typing.Tuple = (),
) -> ModelBase:
"""
Select one row from DB table or View
:param projection: sql projection - default *
:param args: Primary keys or condition and condition_params if there are no primary keys
:param condition: SQL Condition (Will be used if there are no positional args from primary keys)
:param condition_params: Positional params for SQL condition
(Will be used if there are no positional args from primary keys)
:param order_by: Params for SQL order by statement
"""
base_condition = self.MODEL_CLASS.Meta.SQL_STATEMENT_WHERE_BASE
if args:
condition = self._prepare_primary_sql_condition()
condition_params = args
projection_statement = ", ".join(projection) if projection else "*"
order_by_sql_format = ", ".join(order_by)
limit = 1
if base_condition == "1":
where_statement = f"WHERE ({condition})" if condition else ""
else:
where_statement = f"WHERE {base_condition} AND ({condition})" if condition else f"WHERE {base_condition}"
order_by_statement = f"ORDER BY {order_by_sql_format}" if order_by else ""
limit_statement = f"LIMIT {limit}" if limit else ""
sql = self.MODEL_CLASS.Meta.SQL_STATEMENT.format(
PROJECTION=projection_statement,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
OFFSET="",
)
Logger.log.info("ViewManagerBase.select_one.sql", manager=self.__class__.__name__)
result = self.dbi.fetch_one(sql, condition_params)
Logger.log.info("ViewManagerBase.select_one.result", result=result, manager=self.__class__.__name__)
if Config.MANAGER_AUTO_MAP_MODEL_ATTRIBUTES:
return self.MODEL_CLASS(result).map_model_attributes() if result else None
return self.MODEL_CLASS(result) if result else None
def select_all(
self,
condition: str = "1",
condition_params: typing.Tuple = (),
projection: typing.Tuple = (),
order_by: typing.Tuple = (),
limit: int = 0,
offset: int = 0,
) -> typing.List[ModelBase]:
"""
Select all rows matching the condition
:param offset: SQL offset
:param projection: sql projection - default *
:param condition: SQL condition
:param condition_params: Positional params for SQL condition
:param order_by: Params for SQL order by statement
:param limit: Params for SQL limit statement
"""
base_condition = self.MODEL_CLASS.Meta.SQL_STATEMENT_WHERE_BASE
projection_statement = ", ".join(projection) if projection else "*"
if base_condition == "1":
where_statement = f"WHERE ({condition})" if condition else ""
else:
where_statement = f"WHERE {base_condition} AND ({condition})" if condition else f"WHERE {base_condition}"
order_by_sql_format = ", ".join(order_by)
if len(order_by) > 0:
order_by_statement = f"ORDER BY {order_by_sql_format}"
else:
if self.MODEL_CLASS.Meta.SQL_STATEMENT_ORDER_BY_DEFAULT:
order_by_statement = f"ORDER BY {self.MODEL_CLASS.Meta.SQL_STATEMENT_ORDER_BY_DEFAULT}"
else:
order_by_statement = ""
limit_statement = f"LIMIT {limit}" if limit else ""
offset_statement = f"OFFSET {offset}" if offset else ""
sql = self.MODEL_CLASS.Meta.SQL_STATEMENT.format(
PROJECTION=projection_statement,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
OFFSET=offset_statement,
)
Logger.log.info("ViewManagerBase.select_all.sql", manager=self.__class__.__name__)
results = self.dbi.fetch_all(sql, condition_params)
Logger.log.info("ViewManagerBase.select_all.result", result=results, manager=self.__class__.__name__)
if Config.MANAGER_AUTO_MAP_MODEL_ATTRIBUTES:
Logger.log.debug("ViewManagerBase.select_all.result.list.automapped")
return [self.MODEL_CLASS(result).map_model_attributes() for result in results]
Logger.log.debug("ViewManagerBase.select_all.result.list")
return [self.MODEL_CLASS(result) for result in results]
@staticmethod
def models_into_dicts(result: typing.List[ModelBase]) -> typing.List[typing.Dict]:
"""
Convert result of select_all into list of dicts
:param result: List of models
"""
return [item.to_dict() for item in result]
@classmethod
def _prepare_primary_sql_condition(cls):
args = ["{} = %s".format(primary_key) for primary_key in cls.MODEL_CLASS.Meta.PRIMARY_KEYS]
return " AND ".join(args)
@classmethod
def _prepare_primary_sql_condition_params(cls, model_instance: ModelBase):
return [model_instance.__getattribute__(attribute_name) for attribute_name in cls.MODEL_CLASS.Meta.PRIMARY_KEYS]
class TableManagerBase(ViewManagerBase):
def update_one(self, model_instance: ModelBase, exclude_none_values: bool = False, exclude_columns: list = None) -> int:
"""
Update one database record based on model attributes
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from update statement
:param exclude_columns: You can exclude columns names from update statement
:return: Number of affected rows
"""
exclude_columns = exclude_columns or []
if not self.MODEL_CLASS.Meta.PRIMARY_KEYS:
raise ManagerException("Can't update record based on model instance. There are no primary keys specified.")
set_prepare = []
set_prepare_params = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
set_prepare.append("`{}` = %s".format(attribute_name))
set_prepare_params.append(value)
condition_prepare = self._prepare_primary_sql_condition()
condition_prepare_params = self._prepare_primary_sql_condition_params(model_instance)
sql = "UPDATE `{}` SET {} WHERE {} LIMIT 1".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(set_prepare), condition_prepare
)
Logger.log.info("TableManagerBase.update_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, set_prepare_params + condition_prepare_params)
Logger.log.info("TableManagerBase.update_one.result", result=result, manager=self.__class__.__name__)
return result
def insert_one(
self,
model_instance: ModelBase,
exclude_none_values: bool = False,
exclude_columns: list = None,
use_on_duplicate_update_statement: bool = False,
use_insert_ignore_statement: bool = False,
) -> int:
"""
Insert one record into database based on model attributes
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from insert statement
:param exclude_columns: You can exclude columns names from insert statement
:param use_on_duplicate_update_statement: Use ON DUPLICATE KEY UPDATE statement
:param use_insert_ignore_statement: Use INSERT IGNORE statement
:return: Last inserted id if it is possible
"""
exclude_columns = exclude_columns or []
insert_prepare = []
insert_prepare_values = []
insert_prepare_params = []
update_prepare = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
insert_prepare.append("`{}`".format(attribute_name))
insert_prepare_values.append("%s")
insert_prepare_params.append(value)
if use_on_duplicate_update_statement:
update_prepare.append("`{0}` = VALUES(`{0}`)".format(attribute_name))
if use_on_duplicate_update_statement:
sql = "INSERT INTO `{}` ({}) VALUES ({}) ON DUPLICATE KEY UPDATE {}".format(
self.MODEL_CLASS.Meta.TABLE_NAME,
", ".join(insert_prepare),
", ".join(insert_prepare_values),
", ".join(update_prepare),
)
elif use_insert_ignore_statement:
sql = "INSERT IGNORE INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
else:
sql = "INSERT INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
Logger.log.info("TableManagerBase.insert_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, insert_prepare_params)
# set primary key value
if (
result
and len(self.MODEL_CLASS.Meta.PRIMARY_KEYS) == 1
and self.MODEL_CLASS.Meta.ATTRIBUTE_TYPES[self.MODEL_CLASS.Meta.PRIMARY_KEYS[0]] == int
):
model_instance.__setattr__(self.MODEL_CLASS.Meta.PRIMARY_KEYS[0], result)
Logger.log.info("TableManagerBase.insert_one.result", result=result, manager=self.__class__.__name__)
return result
def insert_one_bulk(
self,
model_instance: ModelBase,
exclude_none_values: bool = False,
exclude_columns: list = None,
use_on_duplicate_update_statement: bool = False,
use_insert_ignore_statement: bool = False,
auto_flush: bool = True,
) -> int:
"""
Insert more records in one bulk.
:param model_instance: Model instance
:param exclude_none_values: You can exclude columns with None value from insert statement
:param exclude_columns: You can exclude columns names from insert statement
:param use_on_duplicate_update_statement: Use ON DUPLICATE KEY UPDATE statement
:param use_insert_ignore_statement: Use INSERT IGNORE statement
:param auto_flush: Auto flush bulks from buffer after N records (defined in self.bulk_insert_buffer_size)
:return: Number of items in buffer
"""
exclude_columns = exclude_columns or []
insert_prepare = []
insert_prepare_values = []
insert_prepare_params = []
update_prepare = []
for attribute_name in self.MODEL_CLASS.Meta.ATTRIBUTE_LIST:
value = model_instance.__getattribute__(attribute_name)
if (exclude_none_values and value is None) or attribute_name in exclude_columns:
continue
insert_prepare.append("`{}`".format(attribute_name))
insert_prepare_values.append("%s")
insert_prepare_params.append(value)
if use_on_duplicate_update_statement:
update_prepare.append("`{0}` = VALUES(`{0}`)".format(attribute_name))
if not self.bulk_insert_sql_statement:
if use_on_duplicate_update_statement:
self.bulk_insert_sql_statement = "INSERT INTO `{}` ({}) VALUES ({}) ON DUPLICATE KEY UPDATE {}".format(
self.MODEL_CLASS.Meta.TABLE_NAME,
", ".join(insert_prepare),
", ".join(insert_prepare_values),
", ".join(update_prepare),
)
elif use_insert_ignore_statement:
self.bulk_insert_sql_statement = "INSERT IGNORE INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
else:
self.bulk_insert_sql_statement = "INSERT INTO `{}` ({}) VALUES ({})".format(
self.MODEL_CLASS.Meta.TABLE_NAME, ", ".join(insert_prepare), ", ".join(insert_prepare_values)
)
self.bulk_insert_values_buffer.append(insert_prepare_params)
buffer_len = len(self.bulk_insert_values_buffer)
if auto_flush and buffer_len >= self.bulk_insert_buffer_size:
self.insert_bulk_flush()
return buffer_len
def insert_bulk_flush(self) -> int:
"""
Flush prepared inserts from buffer
:return: Number of inserted rows
"""
result = None
if self.bulk_insert_values_buffer:
result = self.dbi.execute_many(self.bulk_insert_sql_statement, self.bulk_insert_values_buffer)
Logger.log.info(
"TableManagerBase.insert_one_bulk_flush.result",
result=result,
inserted_count=len(self.bulk_insert_values_buffer),
manager=self.__class__.__name__,
)
self.bulk_insert_sql_statement = ""
self.bulk_insert_values_buffer = []
return result
def delete_one(self, model_instance: ModelBase) -> int:
"""
Delete one row matching primary key condition.
:param model_instance: Instance of model
:return: Number of affected rows
"""
condition_prepare = self._prepare_primary_sql_condition()
condition_prepare_params = self._prepare_primary_sql_condition_params(model_instance)
sql_statement = "DELETE FROM `{}` WHERE {} LIMIT 1"
sql = sql_statement.format(self.MODEL_CLASS.Meta.TABLE_NAME, condition_prepare)
Logger.log.info("TableManagerBase.delete_one.sql", manager=self.__class__.__name__)
result = self.dbi.execute(sql, condition_prepare_params)
Logger.log.info(f"TableManagerBase.delete_one.result", result=result, manager=self.__class__.__name__)
return result
def delete_all(
self, condition: str, condition_params: typing.Tuple = (), order_by: typing.Tuple = (), limit: int = 0
) -> int:
"""
Delete all table rows matching condition.
:param condition: SQL condition statement
:param condition_params: SQL condition position params
:param order_by: SQL order statement
:param limit: SQL limit statement
:return: Number of affected rows
"""
where_statement = f"WHERE {condition}"
order_by_sql_format = ", ".join(order_by)
order_by_statement = f"ORDER BY {order_by_sql_format}" if order_by else ""
limit_statement = f"LIMIT {limit}" if limit else ""
sql_statement = "DELETE FROM `{TABLE}` {WHERE} {ORDER_BY} {LIMIT}"
sql = sql_statement.format(
TABLE=self.MODEL_CLASS.Meta.TABLE_NAME,
WHERE=where_statement,
ORDER_BY=order_by_statement,
LIMIT=limit_statement,
)
| |
)
self.buttons["add_domain"] = self._add_button(
"Add selected",
action=self._add_domain,
width=self._width,
tooltip="Add a selected mesh as a domain",
)
self.buttons["delete_domain"] = self._add_button(
"Delete",
action=self._delete_domain,
width=self._width,
tooltip="Delete a selected domain",
)
self.buttons["tetrahedralize"] = self._add_button(
"Tetrahedralize",
action=self.tetrahedralize,
width=self._width,
tooltip="Tetrahedralize selected mesh",
)
self.tetparams["format_menu"] = self._addElemt(
name="Mesh format:",
value=self.tetmesh_options,
width=self._width,
height=self._height,
variable=self.addVariable("int", 1),
type="pullMenu",
)
self.tetparams["dihedral_angle"] = self._add_float(
"Min dihed deg:",
mini=0.0001,
action=None,
maxi=20.0,
init=10.0,
tooltip="The minimal dihedral angle",
step=0.5,
precision=1.0,
width=self._width,
)
self.tetparams["aspect_ratio"] = self._add_float(
"Min aspec rat:",
mini=1.0,
maxi=5.0,
action=None,
init=1.3,
tooltip="The minimal aspect ratio",
step=0.1,
precision=1.0,
width=self._width,
)
# Declare UI for exiting
self.buttons["exit"] = self._add_button(
"Exit", action=self.exit_event, tooltip="Exit GAMer mesh improvements"
)
# Add labels for sliders
for operation in ["coarse_dense", "coarse_flat", "smooth"]:
self.labels[operation] = {}
for key, elemt in self.gparams[operation].items():
self.labels[operation][key] = self._add_text(
elemt["name"], width=elemt["width"]
)
for key, elemt in self.mparams.items():
if key == "name":
self.labels[key] = self._add_text("Name:", width=elemt["width"])
else:
self.labels[key] = self._add_text(elemt["name"], width=elemt["width"])
for key, elemt in self.tetparams.items():
self.labels[key] = self._add_text(elemt["name"], width=elemt["width"])
self.labels["empty"] = self._add_text("")
self.labels["small_empty"] = self._add_text("", width=self._height)
self.labels["large_empty"] = self._add_text("", width=self._width * 1.2)
def surface_mesh_generation_frame(self):
if len(self._layout) > self._surface_mesh_generation_index:
collapse = self._layout[self._surface_mesh_generation_index]["collapse"]
else:
collapse = True
frame_info = []
frame_info.append([self.mesh_params["surface_mesh_menu"]])
# Build panel based on what menu item is selected
mesh_type = self.mesh_menu_items[
self.getLong(self.mesh_params["surface_mesh_menu"])
]
if mesh_type == "Lattice":
pass
# FIXME: Add stuff!
elif mesh_type == "Sphere":
frame_info.append([self.mesh_params["Sphere_divisions"]])
elif mesh_type == "PDB (Gauss)":
frame_info.append([self.mesh_params["Blobbyness"]])
frame_info.append([self.mesh_params["Iso_value"]])
elif mesh_type == "Lattice":
myprint("Not supported...")
frame_info.append([self.buttons["import_surface_mesh"]])
return self._addLayout(
name="Surface mesh import", elems=frame_info, collapse=collapse
)
def mesh_improvement_frame(self):
# Inherit the collapse value of the frame if it excists
frame_info = []
frame_info.append(
[
self.labels["empty"],
self.labels["coarse_dense"]["rate"],
self.labels["coarse_dense"]["numiter"],
]
)
frame_info.append(
[
self.buttons["coarse_dense"],
self.gparams["coarse_dense"]["rate"],
self.gparams["coarse_dense"]["numiter"],
]
)
frame_info.append(
[self.buttons["coarse_flat"], self.gparams["coarse_flat"]["rate"]]
)
frame_info.append(
[
self.labels["empty"],
self.labels["smooth"]["max_min_angle"],
self.labels["smooth"]["max_iter"],
]
)
frame_info.append(
[
self.buttons["smooth"],
self.gparams["smooth"]["max_min_angle"],
self.gparams["smooth"]["max_iter"],
]
)
frame_info.append([self.gparams["smooth"]["preserve_ridges"]])
frame_info.append([self.labels["empty"]])
frame_info.append([self.buttons["normal_smooth"]])
frame_info.append([self.labels["empty"]])
frame_info.append([self.gparams["create_new_mesh"]])
frame_info.append([self.buttons["repair"], self.buttons["centralize"]])
frame_info.append(
[self.buttons["delete_faces"], self.buttons["triangulate_holes"]]
)
return self._addLayout(name="Surface Mesh Improvements", elems=frame_info)
def boundary_marking_frame(self):
frame_info = []
frame_info.append(
[self.labels["name"], self.mparams["name"], self.mparams["boundary_menu"]]
)
frame_info.append(
[self.labels["marker"], self.mparams["marker"], self.mparams["color"]]
)
frame_info.append(
[self.buttons["create_boundary"], self.buttons["delete_boundary"]]
)
frame_info.append(
[self.buttons["select_boundary"], self.buttons["deselect_boundary"]]
)
# FIXME: These functions does not work...
# FIXME: And they produce large memopry leaks...
# frame_info.append([self.buttons["hide_boundary"],\
# self.buttons["unhide_boundary"]])
frame_info.append([self.buttons["assign_boundary"]])
frame_info.append([self.buttons["calculate_areas"]])
return self._addLayout(name="Boundary marking", elems=frame_info)
def tetrahedralization_frame(self, domain=True):
if len(self._layout) > self._tetrahedarlization_index:
collapse = self._layout[self._tetrahedarlization_index]["collapse"]
else:
collapse = True
frame_info = []
frame_info.append([self.labels["domain_menu"], self.buttons["add_domain"]])
# If domain is passed
if domain:
frame_info.append(
[self.tetparams["domain_menu"], self.buttons["delete_domain"]]
)
frame_info.append([self.tetparams["domain_as_hole"]])
# if not domain["as_hole"]:
frame_info.append(
[self.labels["domainmarker"], self.tetparams["domainmarker"]]
)
frame_info.append(
[self.labels["volume_constraint"], self.tetparams["volume_constraint"]]
)
frame_info.append([self.tetparams["use_volume_constraint"]])
frame_info.append([self.labels["empty"]])
frame_info.append(
[self.labels["dihedral_angle"], self.labels["aspect_ratio"]]
)
frame_info.append(
[self.tetparams["dihedral_angle"], self.tetparams["aspect_ratio"]]
)
frame_info.append([self.labels["empty"], self.labels["format_menu"]])
frame_info.append(
[self.buttons["tetrahedralize"], self.tetparams["format_menu"]]
)
return self._addLayout(
name="Tetrahedralization", elems=frame_info, collapse=collapse
)
def _update_mesh_import(self):
self._layout[
self._surface_mesh_generation_index
] = self.surface_mesh_generation_frame()
self.updateViewer()
def import_surface_mesh(self):
file_type = self.mesh_menu_items[
self.getLong(self.mesh_params["surface_mesh_menu"])
]
# myprint("Importing file_type ", file_type)
# Import surface mesh from file
if file_type in ["OFF", "PDB (Surface)", "PDB (Gauss)", "Lattice"]:
self.save_to_registry()
if "PDB" in file_type:
suffix = "*.pdb"
elif "OFF" in file_type:
suffix = "*.off"
else:
suffix = "*.lat"
self.fileDialog(
label="Import",
callback=self.import_surface_mesh_from_file_action,
suffix=suffix,
)
elif file_type == "Sphere":
divisions = self.getLong(self.mesh_params["Sphere_divisions"])
gmesh = gamer.SurfaceMesh(divisions)
self.gamer_to_host(gmesh, {}, "Sphere", switch_layer=False)
def update_min_max_angle(self, *args):
self.setReal(
self.gparams["smooth"]["min_max_angle"],
180 - 2 * self.getVal(self.gparams["smooth"]["max_min_angle"]),
)
def exit_event(self, *args):
self.save_to_registry()
self.close()
def _get_next_marker(self, marker, boundaries=None):
if boundaries is None:
obj = self._get_selected_mesh()
if obj is None:
return marker
boundaries = self.helper.getProperty(obj, "boundaries")
if boundaries is None:
return marker
markers = [b["marker"] for b in list(boundaries.values())]
while marker in markers:
marker += 1
return marker
def _attach_suffix_to_str(self, name):
suffices = re.findall("\.([0-9]*)", name)
if suffices:
num = int(suffices[0]) + 1
name = name.replace("." + suffices[0], "")
else:
num = 1
return "%s.%03d" % (name, num)
def _new_boundary(self):
"Create a boundary using default name, marker and color"
# Get selected mesh
obj = self._get_selected_mesh()
if obj is None:
return
name = self.getVal(self.mparams["name"])
if name == "":
name = "Boundary"
boundaries = self.helper.getProperty(obj, "boundaries")
if not boundaries:
self.helper.setProperty(obj, "boundaries", {})
boundaries = self.helper.getProperty(obj, "boundaries")
while name in boundaries:
name = self._attach_suffix_to_str(name)
marker = self._get_next_marker(self.getVal(self.mparams["marker"]), boundaries)
color = self.getVal(self.mparams["color"])
# Allways new entitiy
boundaries[name] = dict(
marker=marker, r=color[0], g=color[1], b=color[2], faces={}
)
names = self.menu_items.get_names()
self.setVal(self.mparams["boundary_menu"], names.index(name))
self._update_boundary_menu()
# self._assign_boundary()
def _repaint_boundaries(self, obj):
# Grab mesh name
boundaries = self.helper.getProperty(obj, "boundaries")
if not boundaries:
return
# Ensure editmode is off
editmode = self.helper.toggleEditMode()
# Paint boundaries
items = (
boundaries.items if hasattr(boundaries, "items") else boundaries.iteritems
)
for name, boundary in items():
faces = self._get_boundary_faces(boundary)
self.helper.changeColor(
obj,
(boundary["r"], boundary["g"], boundary["b"]),
facesSelection=faces,
faceMaterial=False,
)
# Restore editmode
self.helper.restoreEditMode(editmode)
def _get_boundary_faces(self, boundary):
if not "faces" in boundary:
return []
all_faces = []
for faces in list(boundary["faces"].values()):
all_faces.extend(faces)
return all_faces
def _set_boundary_faces(self, boundary, faces):
"Set faces in boundary props"
if not "faces" in boundary:
return
assert isinstance(faces, list)
# Maximal indices in a array prop in Blender is 10000
max_ind = 10000
num_sub_arrays = len(faces) / max_ind + 1
# If the faces allready excist delete it and re attach it
if "faces" in boundary:
for key in boundary["faces"]:
del boundary["faces"][key]
del boundary["faces"]
boundary["faces"] = {}
for ind in range(num_sub_arrays):
boundary["faces"]["F%d" % ind] = faces[
ind * max_ind : min((ind + 1) * max_ind, len(faces))
]
def _empty_menu_action(self):
self.setVal(self.mparams["boundary_menu"], 0) # why is it comments
self.setVal(self.mparams["name"], "")
self.setVal(self.mparams["marker"], 1)
self.setVal(self.mparams["color"], gray)
def _delete_boundary(self):
if len(self.menu_items) == 0:
return
name = self.menu_items[self.getLong(self.mparams["boundary_menu"])]
obj = self._get_selected_mesh()
if obj is None:
return
boundaries = self.helper.getProperty(obj, "boundaries")
if name not in boundaries:
return
names = self.menu_items.get_names()
if not names:
return
is_empty = len(names) == 1
is_last = names[-1] == name
need_repaint = bool(self._get_boundary_faces(boundaries[name]))
if need_repaint:
# Ensure editmode is off
editmode = self.helper.toggleEditMode()
faces = self._get_boundary_faces(boundaries[name])
self.helper.changeColor(obj, gray, facesSelection=faces, faceMaterial=False)
# Restore editmode
self.helper.restoreEditMode(editmode)
# Update value of menu
if is_empty:
self._empty_menu_action()
elif is_last:
self.setVal(self.mparams["boundary_menu"], len(names) - 2)
# Do the actuall deletion
for key in ["marker", "r", "g", "b"]:
del boundaries[name][key]
for key in boundaries[name]["faces"]:
del boundaries[name]["faces"][key]
del boundaries[name]["faces"]
del boundaries[name]
self._update_boundary_menu()
def _select_boundary(self, select=True):
if len(self.menu_items) == 0:
return
boundary = self._get_boundary(
self.menu_items[self.getLong(self.mparams["boundary_menu"])]
)
if not boundary:
return
faces = self._get_boundary_faces(boundary)
obj = self._get_selected_mesh()
if obj is None:
return
self.helper.selectFaces(obj, faces, select)
def _deselect_boundary(self):
self._select_boundary(False)
def _hide_boundary(self, hide=True):
if len(self.menu_items) == 0:
return
boundary = self._get_boundary(
self.menu_items[self.getLong(self.mparams["boundary_menu"])]
)
if not boundary:
return
faces = self._get_boundary_faces(boundary)
obj = self._get_selected_mesh()
if obj is None:
return
self.helper.hideFaces(obj, faces, hide)
def _unhide_boundary(self):
self._hide_boundary(False)
def _calculate_area(self):
if len(self.menu_items) == 0:
return
name = self.menu_items[self.getLong(self.mparams["boundary_menu"])]
boundary = self._get_boundary(name)
if not boundary:
return
obj = self._get_selected_mesh()
if obj is None:
return
faces = self._get_boundary_faces(boundary)
area = self.helper.faceArea(obj, faces)
if area > 0:
print("Area of boundary '%s' %.2e" % (name, area))
def calculate_areas(self):
"Calculate areas of all boundaries"
try:
from cPickle import dump
except:
from pickle import dump
if len(self.menu_items) == 0:
return
obj = self._get_selected_mesh()
if obj is None:
return
areas = {}
for boundary_name in self.menu_items:
boundary = self._get_boundary(boundary_name)
if not boundary:
continue
faces = self._get_boundary_faces(boundary)
area = self.helper.faceArea(obj, faces)
if area > 0:
areas[boundary_name] = area
myprint("Area of boundary '%s' %.2e" % (boundary_name, area))
dump(areas, open(self.helper.getName(obj) + ".cpickle", "wb"))
def _assign_boundary(self):
# Get selected mesh
if len(self.menu_items) == 0:
return
obj = self._get_selected_mesh()
if obj is None:
return
boundary_name = self.menu_items[self.getLong(self.mparams["boundary_menu"])]
boundaries = self.helper.getProperty(obj, "boundaries")
if not boundaries:
return
if boundary_name not in boundaries:
return
boundary = boundaries[boundary_name]
# Get all faces and indices of all selected faces
faces, faces_selected_indice = self.helper.getMeshFaces(obj, selected=True)
# If no faces were selected
if not faces_selected_indice:
return
# Ensure editmode is off
editmode = self.helper.toggleEditMode()
for bound_name in list(boundaries.keys()):
if bound_name == boundary_name:
continue
bound = boundaries[bound_name]
other_faces = set(self._get_boundary_faces(bound))
if not other_faces.isdisjoint(faces_selected_indice):
other_faces.difference_update(faces_selected_indice)
self._set_boundary_faces(bound, list(other_faces))
# Set the selected faces
self._set_boundary_faces(boundary, faces_selected_indice)
# Restore editmode
self.helper.restoreEditMode(editmode)
# Repaint
self._repaint_boundaries(obj)
def _get_boundary(self, name):
# Get selected mesh
obj = self._get_selected_mesh()
if obj is None:
return
boundaries = self.helper.getProperty(obj, "boundaries")
if not boundaries:
return
if name not in boundaries:
return
return boundaries[name]
def _boundary_to_dict(self, boundary):
pass
def _update_boundary_menu(self):
if len(self.menu_items) == 0:
return
name = self.menu_items[self.getLong(self.mparams["boundary_menu"])]
boundary = self._get_boundary(name)
myprint("update menu boundary ", | |
import logging
import os
import warnings
import typing
from collections import defaultdict, namedtuple
from typing import Any, Dict, List, Optional, Text, Tuple, Union
import rasa.utils.io as io_utils
from rasa.core.domain import Domain
from rasa.nlu.constants import (
EXTRACTOR,
ENTITY_ATTRIBUTE_VALUE,
ENTITY_ATTRIBUTE_TEXT,
ENTITY_ATTRIBUTE_START,
ENTITY_ATTRIBUTE_END,
ENTITY_ATTRIBUTE_TYPE,
)
from rasa.constants import RESULTS_FILE, PERCENTAGE_KEY
from rasa.core.utils import pad_lists_to_size
from rasa.core.events import ActionExecuted, UserUttered
from rasa.nlu.training_data.formats.markdown import MarkdownWriter
from rasa.core.trackers import DialogueStateTracker
from rasa.utils.io import DEFAULT_ENCODING
if typing.TYPE_CHECKING:
from rasa.core.agent import Agent
from rasa.core.processor import MessageProcessor
CONFUSION_MATRIX_STORIES_FILE = "story_confusion_matrix.png"
REPORT_STORIES_FILE = "story_report.json"
FAILED_STORIES_FILE = "failed_stories.md"
SUCCESSFUL_STORIES_FILE = "successful_stories.md"
logger = logging.getLogger(__name__)
StoryEvaluation = namedtuple(
"StoryEvaluation",
[
"evaluation_store",
"failed_stories",
"successful_stories",
"action_list",
"in_training_data_fraction",
],
)
class EvaluationStore:
"""Class storing action, intent and entity predictions and targets."""
def __init__(
self,
action_predictions: Optional[List[Text]] = None,
action_targets: Optional[List[Text]] = None,
intent_predictions: Optional[List[Text]] = None,
intent_targets: Optional[List[Text]] = None,
entity_predictions: Optional[List[Dict[Text, Any]]] = None,
entity_targets: Optional[List[Dict[Text, Any]]] = None,
) -> None:
self.action_predictions = action_predictions or []
self.action_targets = action_targets or []
self.intent_predictions = intent_predictions or []
self.intent_targets = intent_targets or []
self.entity_predictions = entity_predictions or []
self.entity_targets = entity_targets or []
def add_to_store(
self,
action_predictions: Optional[Union[Text, List[Text]]] = None,
action_targets: Optional[Union[Text, List[Text]]] = None,
intent_predictions: Optional[Union[Text, List[Text]]] = None,
intent_targets: Optional[Union[Text, List[Text]]] = None,
entity_predictions: Optional[List[Dict[Text, Any]]] = None,
entity_targets: Optional[List[Dict[Text, Any]]] = None,
) -> None:
"""Add items or lists of items to the store"""
for k, v in locals().items():
if k != "self" and v:
attr = getattr(self, k)
if isinstance(v, list):
attr.extend(v)
else:
attr.append(v)
def merge_store(self, other: "EvaluationStore") -> None:
"""Add the contents of other to self"""
self.add_to_store(
action_predictions=other.action_predictions,
action_targets=other.action_targets,
intent_predictions=other.intent_predictions,
intent_targets=other.intent_targets,
entity_predictions=other.entity_predictions,
entity_targets=other.entity_targets,
)
def has_prediction_target_mismatch(self) -> bool:
return (
self.intent_predictions != self.intent_targets
or self.entity_predictions != self.entity_targets
or self.action_predictions != self.action_targets
)
def serialise(self) -> Tuple[List[Text], List[Text]]:
"""Turn targets and predictions to lists of equal size for sklearn."""
targets = (
self.action_targets
+ self.intent_targets
+ [
MarkdownWriter.generate_entity_md(gold.get("text"), gold)
for gold in self.entity_targets
]
)
predictions = (
self.action_predictions
+ self.intent_predictions
+ [
MarkdownWriter.generate_entity_md(predicted.get("text"), predicted)
for predicted in self.entity_predictions
]
)
# sklearn does not cope with lists of unequal size, nor None values
return pad_lists_to_size(targets, predictions, padding_value="None")
class WronglyPredictedAction(ActionExecuted):
"""The model predicted the wrong action.
Mostly used to mark wrong predictions and be able to
dump them as stories."""
type_name = "wrong_action"
def __init__(
self, correct_action, predicted_action, policy, confidence, timestamp=None
) -> None:
self.predicted_action = predicted_action
super().__init__(correct_action, policy, confidence, timestamp=timestamp)
def as_story_string(self) -> Text:
return f"{self.action_name} <!-- predicted: {self.predicted_action} -->"
class EndToEndUserUtterance(UserUttered):
"""End-to-end user utterance.
Mostly used to print the full end-to-end user message in the
`failed_stories.md` output file."""
def as_story_string(self, e2e: bool = True) -> Text:
return super().as_story_string(e2e=True)
class WronglyClassifiedUserUtterance(UserUttered):
"""The NLU model predicted the wrong user utterance.
Mostly used to mark wrong predictions and be able to
dump them as stories."""
type_name = "wrong_utterance"
def __init__(self, event: UserUttered, eval_store: EvaluationStore) -> None:
if not eval_store.intent_predictions:
self.predicted_intent = None
else:
self.predicted_intent = eval_store.intent_predictions[0]
self.predicted_entities = eval_store.entity_predictions
intent = {"name": eval_store.intent_targets[0]}
super().__init__(
event.text,
intent,
eval_store.entity_targets,
event.parse_data,
event.timestamp,
event.input_channel,
)
def as_story_string(self, e2e: bool = True) -> Text:
from rasa.core.events import md_format_message
correct_message = md_format_message(self.text, self.intent, self.entities)
predicted_message = md_format_message(
self.text, self.predicted_intent, self.predicted_entities
)
return (
f"{self.intent.get('name')}: {correct_message} <!-- predicted: "
f"{self.predicted_intent}: {predicted_message} -->"
)
async def _generate_trackers(
resource_name: Text,
agent: "Agent",
max_stories: Optional[int] = None,
use_e2e: bool = False,
) -> List[Any]:
from rasa.core.training.generator import TrainingDataGenerator
from rasa.core import training
story_graph = await training.extract_story_graph(
resource_name, agent.domain, agent.interpreter, use_e2e
)
g = TrainingDataGenerator(
story_graph,
agent.domain,
use_story_concatenation=False,
augmentation_factor=0,
tracker_limit=max_stories,
)
return g.generate()
def _clean_entity_results(
text: Text, entity_results: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Extract only the token variables from an entity dict."""
cleaned_entities = []
for r in tuple(entity_results):
cleaned_entity = {ENTITY_ATTRIBUTE_TEXT: text}
for k in (
ENTITY_ATTRIBUTE_START,
ENTITY_ATTRIBUTE_END,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_VALUE,
):
if k in set(r):
if k == ENTITY_ATTRIBUTE_VALUE and EXTRACTOR in set(r):
# convert values to strings for evaluation as
# target values are all of type string
r[k] = str(r[k])
cleaned_entity[k] = r[k]
cleaned_entities.append(cleaned_entity)
return cleaned_entities
def _collect_user_uttered_predictions(
event: UserUttered,
partial_tracker: DialogueStateTracker,
fail_on_prediction_errors: bool,
) -> EvaluationStore:
user_uttered_eval_store = EvaluationStore()
intent_gold = event.parse_data.get("true_intent")
predicted_intent = event.parse_data.get("intent", {}).get("name")
if not predicted_intent:
predicted_intent = [None]
user_uttered_eval_store.add_to_store(
intent_predictions=predicted_intent, intent_targets=intent_gold
)
entity_gold = event.parse_data.get("true_entities")
predicted_entities = event.parse_data.get("entities")
if entity_gold or predicted_entities:
user_uttered_eval_store.add_to_store(
entity_targets=_clean_entity_results(event.text, entity_gold),
entity_predictions=_clean_entity_results(event.text, predicted_entities),
)
if user_uttered_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyClassifiedUserUtterance(event, user_uttered_eval_store)
)
if fail_on_prediction_errors:
raise ValueError(
"NLU model predicted a wrong intent. Failed Story:"
" \n\n{}".format(partial_tracker.export_stories())
)
else:
end_to_end_user_utterance = EndToEndUserUtterance(
event.text, event.intent, event.entities
)
partial_tracker.update(end_to_end_user_utterance)
return user_uttered_eval_store
def _emulate_form_rejection(partial_tracker: DialogueStateTracker) -> None:
from rasa.core.events import ActionExecutionRejected
rejected_action_name: Text = partial_tracker.active_loop["name"]
partial_tracker.update(ActionExecutionRejected(rejected_action_name))
def _collect_action_executed_predictions(
processor: "MessageProcessor",
partial_tracker: DialogueStateTracker,
event: ActionExecuted,
fail_on_prediction_errors: bool,
circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, Optional[Text], Optional[float]]:
from rasa.core.policies.form_policy import FormPolicy
action_executed_eval_store = EvaluationStore()
gold = event.action_name
if circuit_breaker_tripped:
predicted = "circuit breaker tripped"
policy = None
confidence = None
else:
action, policy, confidence = processor.predict_next_action(partial_tracker)
predicted = action.name()
if (
policy
and predicted != gold
and _form_might_have_been_rejected(
processor.domain, partial_tracker, predicted
)
):
# Wrong action was predicted,
# but it might be Ok if form action is rejected.
_emulate_form_rejection(partial_tracker)
# try again
action, policy, confidence = processor.predict_next_action(partial_tracker)
# Even if the prediction is also wrong, we don't have to undo the emulation
# of the action rejection as we know that the user explicitly specified
# that something else than the form was supposed to run.
predicted = action.name()
action_executed_eval_store.add_to_store(
action_predictions=predicted, action_targets=gold
)
if action_executed_eval_store.has_prediction_target_mismatch():
partial_tracker.update(
WronglyPredictedAction(
gold, predicted, event.policy, event.confidence, event.timestamp
)
)
if fail_on_prediction_errors:
error_msg = (
"Model predicted a wrong action. Failed Story: "
"\n\n{}".format(partial_tracker.export_stories())
)
if FormPolicy.__name__ in policy:
error_msg += (
"FormAction is not run during "
"evaluation therefore it is impossible to know "
"if validation failed or this story is wrong. "
"If the story is correct, add it to the "
"training stories and retrain."
)
raise ValueError(error_msg)
else:
partial_tracker.update(event)
return action_executed_eval_store, policy, confidence
def _form_might_have_been_rejected(
domain: Domain, tracker: DialogueStateTracker, predicted_action_name: Text
) -> bool:
return (
tracker.active_loop.get("name") == predicted_action_name
and predicted_action_name in domain.form_names
)
def _predict_tracker_actions(
tracker: DialogueStateTracker,
agent: "Agent",
fail_on_prediction_errors: bool = False,
use_e2e: bool = False,
) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]]]:
processor = agent.create_processor()
tracker_eval_store = EvaluationStore()
events = list(tracker.events)
partial_tracker = DialogueStateTracker.from_events(
tracker.sender_id,
events[:1],
agent.domain.slots,
sender_source=tracker.sender_source,
)
tracker_actions = []
should_predict_another_action = True
num_predicted_actions = 0
for event in events[1:]:
if isinstance(event, ActionExecuted):
circuit_breaker_tripped = processor.is_action_limit_reached(
num_predicted_actions, should_predict_another_action
)
(
action_executed_result,
policy,
confidence,
) = _collect_action_executed_predictions(
processor,
partial_tracker,
event,
fail_on_prediction_errors,
circuit_breaker_tripped,
)
tracker_eval_store.merge_store(action_executed_result)
tracker_actions.append(
{
"action": action_executed_result.action_targets[0],
"predicted": action_executed_result.action_predictions[0],
"policy": policy,
"confidence": confidence,
}
)
should_predict_another_action = processor.should_predict_another_action(
action_executed_result.action_predictions[0]
)
num_predicted_actions += 1
elif use_e2e and isinstance(event, UserUttered):
user_uttered_result = _collect_user_uttered_predictions(
event, partial_tracker, fail_on_prediction_errors
)
tracker_eval_store.merge_store(user_uttered_result)
else:
partial_tracker.update(event)
if isinstance(event, UserUttered):
num_predicted_actions = 0
return tracker_eval_store, partial_tracker, tracker_actions
def _in_training_data_fraction(action_list: List[Dict[Text, Any]]) -> float:
"""Given a list of action items, returns the fraction of actions
that were predicted using one of the Memoization policies."""
from rasa.core.policies.ensemble import SimplePolicyEnsemble
in_training_data = [
a["action"]
for a in action_list
if a["policy"] and not SimplePolicyEnsemble.is_not_memo_policy(a["policy"])
]
return len(in_training_data) / len(action_list)
def _collect_story_predictions(
completed_trackers: List["DialogueStateTracker"],
agent: "Agent",
fail_on_prediction_errors: bool = False,
use_e2e: bool = False,
) -> Tuple[StoryEvaluation, int]:
"""Test the stories from a file, running them through the stored model."""
from rasa.test import get_evaluation_metrics
from tqdm import tqdm
story_eval_store = EvaluationStore()
failed = []
success = []
correct_dialogues = []
number_of_stories = len(completed_trackers)
logger.info(f"Evaluating {number_of_stories} stories\nProgress:")
action_list = []
for tracker in tqdm(completed_trackers):
tracker_results, predicted_tracker, tracker_actions = _predict_tracker_actions(
tracker, agent, fail_on_prediction_errors, use_e2e
)
story_eval_store.merge_store(tracker_results)
action_list.extend(tracker_actions)
if tracker_results.has_prediction_target_mismatch():
# there is at least one wrong prediction
failed.append(predicted_tracker)
correct_dialogues.append(0)
else:
correct_dialogues.append(1)
success.append(predicted_tracker)
logger.info("Finished collecting predictions.")
with warnings.catch_warnings():
from sklearn.exceptions import UndefinedMetricWarning
warnings.simplefilter("ignore", UndefinedMetricWarning)
report, precision, f1, accuracy = get_evaluation_metrics(
[1] * len(completed_trackers), correct_dialogues
)
in_training_data_fraction = _in_training_data_fraction(action_list)
_log_evaluation_table(
[1] * len(completed_trackers),
"END-TO-END" if use_e2e else "CONVERSATION",
report,
precision,
f1,
accuracy,
in_training_data_fraction,
include_report=False,
)
return (
StoryEvaluation(
evaluation_store=story_eval_store,
failed_stories=failed,
successful_stories=success,
action_list=action_list,
in_training_data_fraction=in_training_data_fraction,
),
number_of_stories,
)
def _log_stories(
stories: List[DialogueStateTracker], filename: Text, out_directory: Text
) -> None:
"""Write given stories to the given file."""
if not out_directory:
return
with open(
os.path.join(out_directory, filename), "w", encoding=DEFAULT_ENCODING
) as f:
if not stories:
f.write("<!-- No stories found. -->")
for story in | |
<reponame>gyang21/tailor
#!/usr/bin/python
#
# Copyright (C) 2020 ByteDance Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
counter = {}
def verify(reader):
LOGGER('HEADER', 'protocol: %s' % reader.read(19).decode('ascii'))
LOGGER('HEADER', 'indentify: %d' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
LOGGER('HEADER', 'timestamp: %d' % int.from_bytes(reader.read(8), byteorder='big', signed=False))
length = os.path.getsize(reader.name)
while reader.tell() < length:
tag = int.from_bytes(reader.read(1), byteorder='big', signed=False)
if tag == 0x01: # STRING
verify_STRING(reader)
elif tag == 0x02: # LOAD_CLASS
verify_LOAD_CLASS(reader)
elif tag == 0x03: # UNLOAD_CLASS
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x04: # STACK_FRAME
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x05: # STACK_TRACE
verify_STACK_TRACE(reader)
elif tag == 0x06: # ALLOC_SITES
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x07: # HEAP_SUMMARY
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x0A: # START_THREAD
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x0B: # END_THREAD
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x0C: # HEAP_DUMP
verify_HEAP_DUMP_SEGMENT(reader)
elif tag == 0x0D: # CPU_SAMPLES
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x0E: # CONTROL_SETTINGS
raise Exception('Not supported tag: %d' % tag)
elif tag == 0x1C: # HEAP_DUMP_SEGMENT
verify_HEAP_DUMP_SEGMENT(reader)
elif tag == 0x2C: # HEAP_DUMP_END
verify_HEAP_DUMP_END(reader)
else:
raise Exception('Not supported tag: %d, length: %d' % (tag, reader.tell()))
def verify_STRING(reader):
COUNTER('STRING')
LOGGER('STRING', 'tag: 0x01')
LOGGER('STRING', 'timestamp: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
length = int.from_bytes(reader.read(4), byteorder='big', signed=False) - 4
LOGGER('STRING', 'length: %d' % length)
LOGGER('STRING', 'ID for this string: 0x%s ' % reader.read(4).hex())
LOGGER('STRING', 'value: %s' % reader.read(length).decode('ascii'))
def verify_LOAD_CLASS(reader):
COUNTER('LOAD_CLASS')
LOGGER('LOAD-CLASS', 'tag: 0x02')
LOGGER('LOAD-CLASS', 'timestamp: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
LOGGER('LOAD-CLASS', 'length: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
LOGGER('LOAD-CLASS', 'class serial number: 0x%s ' % reader.read(4).hex())
LOGGER('LOAD-CLASS', 'class object ID: 0x%s ' % reader.read(4).hex())
LOGGER('LOAD-CLASS', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
LOGGER('LOAD-CLASS', 'class name string ID: 0x%s ' % reader.read(4).hex())
def verify_STACK_TRACE(reader):
COUNTER('STACK_TRACE')
LOGGER('STACK-TRACE', 'tag: 0x05')
LOGGER('STACK-TRACE', 'timestamp: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
LOGGER('STACK-TRACE', 'length: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
LOGGER('STACK-TRACE', 'serial number: 0x%s ' % reader.read(4).hex())
LOGGER('STACK-TRACE', 'thread serial number: 0x%s ' % reader.read(4).hex())
length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('STACK-TRACE', 'number of frames: %d ' % length)
reader.seek(4 * length, 1)
def verify_HEAP_DUMP_SEGMENT(reader):
COUNTER('HEAP_DUMP_SEGMENT')
LOGGER('HEAP-DUMP-SEGMENT', 'tag: 0x1C')
LOGGER('HEAP-DUMP-SEGMENT', 'timestamp: %d ' % int.from_bytes(reader.read(4), byteorder='big', signed=False))
length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('HEAP-DUMP-SEGMENT', 'length: %d ' % length)
available = length
while available > 0:
available += reader.tell()
tag = int.from_bytes(reader.read(1), byteorder='big', signed=False)
reader.seek(-1, 1)
if tag == 0x01: # ROOT_JNI_GLOBAL
verify_ROOT_JNI_GLOBAL(reader)
elif tag == 0x02: # ROOT_JNI_LOCAL
verify_ROOT_JNI_LOCAL(reader)
elif tag == 0x03: # ROOT_JAVA_FRAME
verify_ROOT_JAVA_FRAME(reader)
elif tag == 0x04: # ROOT_NATIVE_STACK
verify_ROOT_NATIVE_STACK(reader)
elif tag == 0x05: # ROOT_STICKY_CLASS
verify_ROOT_STICKY_CLASS(reader)
elif tag == 0x06: # ROOT_THREAD_BLOCK
verify_ROOT_THREAD_BLOCK(reader)
elif tag == 0x07: # ROOT_MONITOR_USED
verify_ROOT_MONITOR_USED(reader)
elif tag == 0x08: # ROOT_THREAD_OBJECT
verify_ROOT_THREAD_OBJECT(reader)
elif tag == 0x20: # ROOT_CLASS_DUMP
verify_CLASS_DUMP(reader)
elif tag == 0x21: # ROOT_INSTANCE_DUMP
verify_INSTANCE_DUMP(reader)
elif tag == 0x22: # OBJECT_ARRAY_DUMP
verify_OBJECT_ARRAY_DUMP(reader)
elif tag == 0x23: # PRIMITIVE_ARRAY_DUMP
verify_PRIMITIVE_ARRAY_DUMP(reader)
elif tag == 0x89: # ROOT_INTERNED_STRING
verify_ROOT_INTERNED_STRING(reader)
elif tag == 0x8A: # ROOT_FINALIZING
verify_ROOT_FINALIZING(reader)
elif tag == 0x8B: # ROOT_DEBUGGER
verify_ROOT_DEBUGGER(reader)
elif tag == 0x8C: # ROOT_REFERENCE_CLEANUP
verify_ROOT_REFERENCE_CLEANUP(reader)
elif tag == 0x8D: # ROOT_VM_INTERNAL
verify_ROOT_VM_INTERNAL(reader)
elif tag == 0x8E: # ROOT_JNI_MONITOR
verify_ROOT_JNI_MONITOR(reader)
elif tag == 0x90: # ROOT_UNREACHABLE
raise Exception('verify_HEAP_DUMP_SEGMENT >>> Not supported tag: %d' % tag)
elif tag == 0xC3: # ROOT_PRIMITIVE_ARRAY_NODATA
raise Exception('verify_HEAP_DUMP_SEGMENT >>> Not supported tag: %d' % tag)
elif tag == 0xFE: # ROOT_HEAP_DUMP_INFO
verify_HEAP_DUMP_INFO(reader)
elif tag == 0xFF: # ROOT_UNKNOWN
verify_ROOT_UNKNOWN(reader)
else:
raise Exception('Not supported tag: %d, length: %d' % tag, reader.tell())
available -= reader.tell()
def verify_HEAP_DUMP_END(reader):
reader.seek(8, 1)
global counter
print(counter)
print('COMPLETE: %d -> %d ' % (reader.tell(), os.path.getsize(reader.name)))
def verify_ROOT_JNI_GLOBAL(reader):
COUNTER('ROOT_JNI_GLOBAL')
LOGGER('ROOT-JNI-GLOBAL', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-JNI-GLOBAL', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JNI-GLOBAL', 'JNI global ref ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_JNI_LOCAL(reader):
COUNTER('ROOT_JNI_LOCAL')
LOGGER('ROOT-JNI-LOCAL', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-JNI-LOCAL', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JNI-LOCAL', 'thread serial number: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JNI-LOCAL', 'frame number in stack trace: 0x%s ' % reader.read(4).hex())
def verify_ROOT_JAVA_FRAME(reader):
COUNTER('ROOT_JAVA_FRAME')
LOGGER('ROOT-JAVA-FRAME', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-JAVA-FRAME', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JAVA-FRAME', 'thread serial number: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JAVA-FRAME', 'frame number in stack trace: 0x%s ' % reader.read(4).hex())
def verify_ROOT_NATIVE_STACK(reader):
COUNTER('ROOT_NATIVE_STACK')
LOGGER('ROOT-NATIVE-STACK', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-NATIVE-STACK', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-NATIVE-STACK', 'thread serial number: 0x%s ' % reader.read(4).hex())
def verify_ROOT_STICKY_CLASS(reader):
COUNTER('ROOT_STICKY_CLASS')
LOGGER('ROOT-STICKY-CLASS', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-STICKY-CLASS', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_THREAD_BLOCK(reader):
COUNTER('ROOT_THREAD_BLOCK')
LOGGER('ROOT-THREAD-BLOCK', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-THREAD-BLOCK', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-THREAD-BLOCK', 'thread serial number: 0x%s ' % reader.read(4).hex())
def verify_ROOT_MONITOR_USED(reader):
COUNTER('ROOT_MONITOR_USED')
LOGGER('ROOT-MONITOR-USED', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-MONITOR-USED', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_THREAD_OBJECT(reader):
COUNTER('ROOT_THREAD_OBJECT')
LOGGER('ROOT-THREAD-OBJECT', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-THREAD-OBJECT', 'thread object ID: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-THREAD-OBJECT', 'thread serial number: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-THREAD-OBJECT', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
def verify_CLASS_DUMP(reader):
COUNTER('CLASS_DUMP')
LOGGER('CLASS-DUMP', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('CLASS-DUMP', 'class object ID: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'super class object ID: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'class loader object ID: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'signers object ID: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'protection domain object ID: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'reserved: 0x%s ' % reader.read(4).hex())
LOGGER('CLASS-DUMP', 'reserved: 0x%s ' % reader.read(4).hex())
instance_size = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('CLASS-DUMP', 'instance size (in bytes): %d ' % instance_size)
constant_fields_count = int(reader.read(2).hex(), 16)
LOGGER('CLASS-DUMP', 'constant fields: %d, %s ' % (constant_fields_count, verify_CLASS_CONSTANT_FIELDS(reader, constant_fields_count)))
static_fields_count = int(reader.read(2).hex(), 16)
LOGGER('CLASS-DUMP', 'static fields: %d, %s ' % (static_fields_count, verify_CLASS_STATIC_FIELDS(reader, static_fields_count)))
instance_fields_count = int(reader.read(2).hex(), 16)
LOGGER('CLASS-DUMP', 'instance fields: %d, %s ' % (instance_fields_count, verify_CLASS_INSTANCE_FIELDS(reader, instance_fields_count)))
def verify_INSTANCE_DUMP(reader):
COUNTER('INSTANCE_DUMP')
LOGGER('INSTANCE-DUMP', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('INSTANCE-DUMP', 'object ID: 0x%s ' % reader.read(4).hex())
LOGGER('INSTANCE-DUMP', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
LOGGER('INSTANCE-DUMP', 'class object ID: 0x%s ' % reader.read(4).hex())
bytes_followed = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('INSTANCE-DUMP', 'number of bytes that followed: %d ' % bytes_followed)
reader.seek(bytes_followed, 1)
def verify_OBJECT_ARRAY_DUMP(reader):
COUNTER('OBJECT_ARRAY_DUMP')
LOGGER('OBJECT-ARRAY-DUMP', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('OBJECT-ARRAY-DUMP', 'array object ID: 0x%s ' % reader.read(4).hex())
LOGGER('OBJECT-ARRAY-DUMP', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('OBJECT-ARRAY-DUMP', 'number of elements: %d ' % length)
LOGGER('OBJECT-ARRAY-DUMP', 'array class object ID: 0x%s ' % reader.read(4).hex())
LOGGER('OBJECT-ARRAY-DUMP', 'elements: %s ' % verify_OBJECT_ARRAY_ELEMENTS(reader, length))
def verify_PRIMITIVE_ARRAY_DUMP(reader):
COUNTER('PRIMITIVE_ARRAY_DUMP')
LOGGER('PRIMITIVE-ARRAY-DUMP', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('PRIMITIVE-ARRAY-DUMP', 'array object ID: 0x%s ' % reader.read(4).hex())
LOGGER('PRIMITIVE-ARRAY-DUMP', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
LOGGER('PRIMITIVE-ARRAY-DUMP', 'number of elements: %d ' % length)
type = int.from_bytes(reader.read(1), byteorder='big', signed=False)
LOGGER('PRIMITIVE-ARRAY-DUMP', 'element type: %d ' % type)
LOGGER('PRIMITIVE-ARRAY-DUMP', 'elements: %s ' % verify_PRIMITIVE_ARRAY_ELEMENTS(reader, type, length))
def verify_ROOT_INTERNED_STRING(reader):
COUNTER('ROOT_INTERNED_STRING')
LOGGER('ROOT-INTERNED-STRING', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-INTERNED-STRING', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_FINALIZING(reader):
COUNTER('ROOT_FINALIZING')
LOGGER('ROOT_FINALIZING', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT_FINALIZING', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_DEBUGGER(reader):
COUNTER('ROOT_DEBUGGER')
LOGGER('ROOT-DEBUGGER', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-DEBUGGER', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_REFERENCE_CLEANUP(reader):
COUNTER('ROOT_REFERENCE_CLEANUP')
LOGGER('ROOT_REFERENCE_CLEANUP', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT_REFERENCE_CLEANUP', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_VM_INTERNAL(reader):
COUNTER('ROOT_VM_INTERNAL')
LOGGER('ROOT-VM-INTERNAL', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-VM-INTERNAL', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_JNI_MONITOR(reader):
COUNTER('ROOT_JNI_MONITOR')
LOGGER('ROOT-JNI-MONITOR', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-JNI-MONITOR', 'string id: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JNI-MONITOR', 'thread serial number: 0x%s ' % reader.read(4).hex())
LOGGER('ROOT-JNI-MONITOR', 'stack trace serial number: 0x%s ' % reader.read(4).hex())
def verify_HEAP_DUMP_INFO(reader):
COUNTER('HEAP_DUMP_INFO')
LOGGER('HEAP-DUMP-INFO', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('HEAP-DUMP-INFO', 'heap ID: 0x%s ' % reader.read(4).hex())
LOGGER('HEAP-DUMP-INFO', 'heap name ID: 0x%s ' % reader.read(4).hex())
def verify_ROOT_UNKNOWN(reader):
COUNTER('ROOT_UNKNOWN')
LOGGER('ROOT-UNKNOWN', 'tag: 0x%s ' % reader.read(1).hex())
LOGGER('ROOT-UNKNOWN', 'object ID: 0x%s ' % reader.read(4).hex())
def verify_OBJECT_ARRAY_ELEMENTS(reader, length):
reader.seek(4 * length, 1)
return []
def verify_PRIMITIVE_ARRAY_ELEMENTS(reader, type, length):
if type >= 12 or type == 3 or type <= 1:
raise Exception('verify_PRIMITIVE_ARRAY_ELEMENTS >>> Not supported type: %d ' % type)
elif type == 2: # object
reader.seek(4 * length, 1)
elif type == 4: # boolean
reader.seek(1 * length, 1)
elif type == 5: # char
reader.seek(2 * length, 1)
elif type == 6: # float
reader.seek(4 * length, 1)
elif type == 7: # double
reader.seek(8 * length, 1)
elif type == 8: # byte
reader.seek(1 * length, 1)
elif type == 9: # short
reader.seek(2 * length, 1)
elif type == 10: # int
reader.seek(4 * length, 1)
elif type == 11: # long
reader.seek(8 * length, 1)
else:
raise Exception('verify_PRIMITIVE_ARRAY_ELEMENTS >>> Not supported type: %d ' % type)
return []
def verify_CLASS_CONSTANT_FIELDS(reader, count):
while count > 0:
count -= 1
reader.seek(2, 1)
type = int.from_bytes(reader.read(1), byteorder='big', signed=False)
if type >= 12 or type == 3 or type <= 1:
raise Exception('verify_CLASS_CONSTANT_FIELDS() not supported type ' % type)
elif type == 2: # object
reader.seek(4, 1)
elif type == 4: # boolean
reader.seek(1, 1)
elif type == 5: # char
reader.seek(2, 1)
elif type == 6: # float
reader.seek(4, 1)
elif type == 7: # double
reader.seek(8, 1)
elif type == 8: # byte
reader.seek(1, 1)
elif type == 9: # short
reader.seek(2, 1)
elif type == 10: # int
reader.seek(4, 1)
elif type == 11: # long
reader.seek(8, 1)
else:
raise Exception('verify_CLASS_CONSTANT_FIELDS() not supported type ' % type)
################################################################################
return []
def verify_CLASS_STATIC_FIELDS(reader, count):
while count > 0:
count | |
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reverse_ptr_set'", 'blank': 'True', 'to': "orm['cyder.Domain']"}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.range': {
'Meta': {'unique_together': "(('start_upper', 'start_lower', 'end_upper', 'end_lower'),)", 'object_name': 'Range', 'db_table': "'range'"},
'allow': ('django.db.models.fields.CharField', [], {'default': "'l'", 'max_length': '1'}),
'allow_voip_phones': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dhcp_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dhcpd_raw_include': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']", 'null': 'True', 'blank': 'True'}),
'end_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'end_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'end_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'is_reserved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Network']"}),
'range_type': ('django.db.models.fields.CharField', [], {'default': "'st'", 'max_length': '2'}),
'range_usage': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'start_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'start_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'start_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.rangeav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'RangeAV', 'db_table': "'range_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Range']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.site': {
'Meta': {'unique_together': "(('name', 'parent'),)", 'object_name': 'Site', 'db_table': "'site'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Site']", 'null': 'True', 'blank': 'True'})
},
'cyder.siteav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SiteAV', 'db_table': "'site_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Site']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.soa': {
'Meta': {'object_name': 'SOA', 'db_table': "'soa'"},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dns_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'expire': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1209600'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minimum': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'refresh': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'retry': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'root_domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'root_of_soa'", 'unique': 'True', 'to': "orm['cyder.Domain']"}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1420739755'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'})
},
'cyder.soaav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SOAAV', 'db_table': "'soa_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.SOA']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.srv': {
'Meta': {'unique_together': "(('label', 'domain', 'target', 'port'),)", 'object_name': 'SRV', 'db_table': "'srv'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'port': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'cyder.sshfp': {
'Meta': {'unique_together': "(('domain', 'label'),)", 'object_name': 'SSHFP', 'db_table': "'sshfp'"},
'algorithm_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fingerprint_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.staticinterface': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower'), ('label', 'domain'))", 'object_name': 'StaticInterface', 'db_table': "'static_interface'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'dhcp_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dns_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'expire': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'default': "'4'", 'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'mac': ('cyder.base.fields.MacAddrField', [], {'max_length': '17', 'null': 'True', 'dhcp_enabled': "'dhcp_enabled'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reverse_staticintr_set'", 'null': 'True', 'to': "orm['cyder.Domain']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.System']"}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'}),
'workgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Workgroup']", 'null': 'True', 'blank': 'True'})
},
'cyder.staticinterfaceav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'StaticInterfaceAV', 'db_table': "'static_interface_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.StaticInterface']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.system': {
'Meta': {'object_name': 'System', 'db_table': "'system'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.systemav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'SystemAV', 'db_table': "'system_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': "orm['cyder.Attribute']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.System']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('cyder.base.eav.fields.EAVValueField', [], {'attribute_field': "''", 'max_length': '255'})
},
'cyder.task': {
'Meta': {'ordering': "['task']", 'object_name': 'Task', 'db_table': "u'task'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ttype': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.token': {
'Meta': {'object_name': 'Token'},
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cyder.txt': {
'Meta': {'object_name': 'TXT', 'db_table': "'txt'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'ctnr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Ctnr']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyder.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'txt_data': ('django.db.models.fields.TextField', [], {}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cyder.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'cyder.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_user_profile'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_ctnr': ('django.db.models.fields.related.ForeignKey', [], {'default': '2', 'to': "orm['cyder.Ctnr']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'cyder.view': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'View', 'db_table': "'view'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cyder.vlan': {
'Meta': {'unique_together': "(('name', 'number'),)", 'object_name': 'Vlan', 'db_table': "'vlan'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'cyder.vlanav': {
'Meta': {'unique_together': "(('entity', 'attribute'),)", 'object_name': 'VlanAV', 'db_table': "'vlan_av'"},
'attribute': ('cyder.base.eav.fields.EAVAttributeField', [], {'to': | |
"silent",
"qdbus",
"sse2",
"sse3",
"sse4.1",
"sse4.2",
"ssse3",
"static",
"static-runtime",
"strip",
"syncqt",
"sysroot",
"testcocoon",
"use-gold-linker",
"warnings-are-errors",
"Werror",
"widgets",
"xplatform",
"zlib",
"eventfd",
"glib",
"icu",
"inotify",
"journald",
"pcre",
"posix-ipc",
"pps",
"slog2",
"syslog",
}
if sinput in skip_inputs:
print(f" **** Skipping input {sinput}: masked.")
return
dtype = data
if isinstance(data, dict):
dtype = data["type"]
if dtype == "boolean":
print(f" **** Skipping boolean input {sinput}: masked.")
return
if dtype == "enum":
values_line = " ".join(data["values"])
cm_fh.write(f"# input {sinput}\n")
cm_fh.write(f'set(INPUT_{featureName(sinput)} "undefined" CACHE STRING "")\n')
cm_fh.write(
f"set_property(CACHE INPUT_{featureName(sinput)} PROPERTY STRINGS undefined {values_line})\n\n"
)
return
print(f" XXXX UNHANDLED INPUT TYPE {dtype} in input description")
return
def get_library_usage_for_compile_test(library):
result = {}
mapped_library = find_3rd_party_library_mapping(library)
if not mapped_library:
result["fixme"] = f"# FIXME: use: unmapped library: {library}\n"
return result
if mapped_library.test_library_overwrite:
target_name = mapped_library.test_library_overwrite
else:
target_name = mapped_library.targetName
result["target_name"] = target_name
result["package_name"] = mapped_library.packageName
result["extra"] = mapped_library.extra
return result
# Handles config.test/foo/foo.pro projects.
def write_standalone_compile_test(cm_fh, ctx, data, config_test_name, is_library_test):
rel_test_project_path = f"{ctx['test_dir']}/{config_test_name}"
if posixpath.exists(f"{ctx['project_dir']}/{rel_test_project_path}/CMakeLists.txt"):
label = ""
libraries = []
packages = []
if "label" in data:
label = data["label"]
if is_library_test and config_test_name in data["libraries"]:
if "label" in data["libraries"][config_test_name]:
label = data["libraries"][config_test_name]["label"]
# If a library entry in configure.json has a test, and
# the test uses a config.tests standalone project, we
# need to get the package and target info for the
# library, and pass it to the test so compiling and
# linking succeeds.
library_usage = get_library_usage_for_compile_test(config_test_name)
if "target_name" in library_usage:
libraries.append(library_usage["target_name"])
if "package_name" in library_usage:
find_package_arguments = []
find_package_arguments.append(library_usage["package_name"])
if "extra" in library_usage:
find_package_arguments.extend(library_usage["extra"])
package_line = "PACKAGE " + " ".join(find_package_arguments)
packages.append(package_line)
cm_fh.write(
f"""
qt_config_compile_test("{config_test_name}"
LABEL "{label}"
PROJECT_PATH "${{CMAKE_CURRENT_SOURCE_DIR}}/{rel_test_project_path}"
"""
)
if libraries:
libraries_string = " ".join(libraries)
cm_fh.write(f" LIBRARIES {libraries_string}\n")
if packages:
packages_string = " ".join(packages)
cm_fh.write(f" PACKAGES {packages_string}")
cm_fh.write(")\n")
def write_compile_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
if manual_library_list is None:
manual_library_list = []
inherited_test_name = details["inherit"] if "inherit" in details else None
inherit_details = None
if inherited_test_name and is_library_test:
inherit_details = data["libraries"][inherited_test_name]["test"]
if not inherit_details:
print(f" XXXX Failed to locate inherited library test {inherited_test_name}")
if isinstance(details, str):
write_standalone_compile_test(cm_fh, ctx, data, details, is_library_test)
return
def resolve_head(detail):
head = detail.get("head", "")
if isinstance(head, list):
head = "\n".join(head)
return head
head = ""
if inherit_details:
head += resolve_head(inherit_details)
head += resolve_head(details)
sourceCode = head + "\n"
def resolve_include(detail, keyword):
include = detail.get(keyword, "")
if isinstance(include, list):
include = "#include <" + ">\n#include <".join(include) + ">"
elif include:
include = f"#include <{include}>"
return include
include = ""
if is_library_test:
if inherit_details:
inherited_lib_data = data["libraries"][inherited_test_name]
include += resolve_include(inherited_lib_data, "headers")
this_lib_data = data["libraries"][name]
include += resolve_include(this_lib_data, "headers")
else:
if inherit_details:
include += resolve_include(inherit_details, "include")
include += resolve_include(details, "include")
sourceCode += include + "\n"
def resolve_tail(detail):
tail = detail.get("tail", "")
if isinstance(tail, list):
tail = "\n".join(tail)
return tail
tail = ""
if inherit_details:
tail += resolve_tail(inherit_details)
tail += resolve_tail(details)
sourceCode += tail + "\n"
sourceCode += "int main(int argc, char **argv)\n"
sourceCode += "{\n"
sourceCode += " (void)argc; (void)argv;\n"
sourceCode += " /* BEGIN TEST: */\n"
def resolve_main(detail):
main = detail.get("main", "")
if isinstance(main, list):
main = "\n".join(main)
return main
main = ""
if inherit_details:
main += resolve_main(inherit_details)
main += resolve_main(details)
sourceCode += main + "\n"
sourceCode += " /* END TEST: */\n"
sourceCode += " return 0;\n"
sourceCode += "}\n"
sourceCode = sourceCode.replace('"', '\\"')
librariesCmakeName = ""
languageStandard = ""
compileOptions = ""
qmakeFixme = ""
cm_fh.write(f"# {name}\n")
if "qmake" in details: # We don't really have many so we can just enumerate them all
if details["qmake"] == "unix:LIBS += -lpthread":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (UNIX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "linux: LIBS += -lpthread -lrt":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (LINUX)\n")
cm_fh.write(" set(" + librariesCmakeName + " pthread rt)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "!winrt: LIBS += runtimeobject.lib":
librariesCmakeName = format(featureName(name)) + "_TEST_LIBRARIES"
cm_fh.write("if (NOT WINRT)\n")
cm_fh.write(" set(" + librariesCmakeName + " runtimeobject)\n")
cm_fh.write("endif()\n")
elif details["qmake"] == "CONFIG += c++11":
# do nothing we're always in c++11 mode
pass
elif details["qmake"] == "CONFIG += c++11 c++14":
languageStandard = "CXX_STANDARD 14"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17":
languageStandard = "CXX_STANDARD 17"
elif details["qmake"] == "CONFIG += c++11 c++14 c++17 c++2a":
languageStandard = "CXX_STANDARD 20"
elif details["qmake"] == "QMAKE_CXXFLAGS += -fstack-protector-strong":
compileOptions = details["qmake"][18:]
else:
qmakeFixme = f"# FIXME: qmake: {details['qmake']}\n"
library_list = []
test_libraries = manual_library_list
if "use" in data:
test_libraries += data["use"].split(" ")
for library in test_libraries:
if len(library) == 0:
continue
adjusted_library = get_compile_test_dependent_library_mapping(name, library)
library_usage = get_library_usage_for_compile_test(adjusted_library)
if "fixme" in library_usage:
qmakeFixme += library_usage["fixme"]
continue
else:
library_list.append(library_usage["target_name"])
cm_fh.write(f"qt_config_compile_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
if librariesCmakeName != "" or len(library_list) != 0:
cm_fh.write(" LIBRARIES\n")
if librariesCmakeName != "":
cm_fh.write(lineify("", "${" + librariesCmakeName + "}"))
if len(library_list) != 0:
cm_fh.write(" ")
cm_fh.write("\n ".join(library_list))
cm_fh.write("\n")
if compileOptions != "":
cm_fh.write(f" COMPILE_OPTIONS {compileOptions}\n")
cm_fh.write(" CODE\n")
cm_fh.write('"' + sourceCode + '"')
if qmakeFixme != "":
cm_fh.write(qmakeFixme)
if languageStandard != "":
cm_fh.write(f"\n {languageStandard}\n")
cm_fh.write(")\n\n")
# "tests": {
# "cxx11_future": {
# "label": "C++11 <future>",
# "type": "compile",
# "test": {
# "include": "future",
# "main": [
# "std::future<int> f = std::async([]() { return 42; });",
# "(void)f.get();"
# ],
# "qmake": "unix:LIBS += -lpthread"
# }
# },
def write_compiler_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_compiler_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def write_linker_supports_flag_test(
ctx, name, details, data, cm_fh, manual_library_list=None, is_library_test=False
):
cm_fh.write(f"qt_config_linker_supports_flag_test({featureName(name)}\n")
cm_fh.write(lineify("LABEL", data.get("label", "")))
cm_fh.write(lineify("FLAG", data.get("flag", "")))
cm_fh.write(")\n\n")
def parseTest(ctx, test, data, cm_fh):
skip_tests = {
"c11",
"c99",
"gc_binaries",
"precomile_header",
"reduce_exports",
"gc_binaries",
"libinput_axis_api",
"wayland-scanner",
"xlib",
}
if test in skip_tests:
print(f" **** Skipping features {test}: masked.")
return
if data["type"] == "compile":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compile_test(ctx, test, details, data, cm_fh)
if data["type"] == "compilerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_compiler_supports_flag_test(ctx, test, details, data, cm_fh)
if data["type"] == "linkerSupportsFlag":
knownTests.add(test)
if "test" in data:
details = data["test"]
else:
details = test
write_linker_supports_flag_test(ctx, test, details, data, cm_fh)
elif data["type"] == "libclang":
knownTests.add(test)
cm_fh.write(f"# {test}\n")
lib_clang_lib = find_3rd_party_library_mapping("libclang")
cm_fh.write(generate_find_package_info(lib_clang_lib))
cm_fh.write(
dedent(
"""
if(TARGET WrapLibClang::WrapLibClang)
set(TEST_libclang "ON" CACHE BOOL "Required libclang version found." FORCE)
endif()
"""
)
)
cm_fh.write("\n")
elif data["type"] == "x86Simd":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_x86simd({test} "{label}")\n')
cm_fh.write("\n")
elif data["type"] == "machineTuple":
knownTests.add(test)
label = data["label"]
cm_fh.write(f"# {test}\n")
cm_fh.write(f'qt_config_compile_test_machine_tuple("{label}")\n')
cm_fh.write("\n")
# "features": {
# "android-style-assets": {
# "label": "Android Style Assets",
# "condition": "config.android",
# "output": [ "privateFeature" ],
# "comment": "This belongs into gui, but the license check needs it here already."
# },
else:
print(f" XXXX UNHANDLED TEST TYPE {data['type']} in test description")
def get_feature_mapping():
# This is *before* the feature name gets normalized! So keep - and + chars, etc.
feature_mapping = {
"alloc_h": None, # handled by alloc target
"alloc_malloc_h": None,
"alloc_stdlib_h": None,
"build_all": None,
"ccache": {"autoDetect": "1", "condition": "QT_USE_CCACHE"},
"compiler-flags": None,
"cross_compile": {"condition": "CMAKE_CROSSCOMPILING"},
"debug_and_release": {
"autoDetect": "1", # Setting this to None has weird effects...
"condition": "QT_GENERATOR_IS_MULTI_CONFIG",
},
"debug": {
"autoDetect": "ON",
"condition": "CMAKE_BUILD_TYPE STREQUAL Debug OR Debug IN_LIST CMAKE_CONFIGURATION_TYPES",
},
"dlopen": {"condition": "UNIX"},
"force_debug_info": {
"autoDetect": "CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo OR RelWithDebInfo IN_LIST CMAKE_CONFIGURATION_TYPES"
},
"framework": {
"condition": "APPLE AND BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE STREQUAL Debug"
},
"gc_binaries": {"condition": "NOT QT_FEATURE_shared"},
"gcc-sysroot": None,
"gcov": None,
"GNUmake": None,
"host-dbus": None,
"iconv": {
"condition": "NOT QT_FEATURE_icu AND QT_FEATURE_textcodec AND NOT WIN32 AND NOT QNX AND NOT ANDROID AND NOT APPLE AND WrapIconv_FOUND",
},
"incredibuild_xge": None,
"ltcg": {
"autoDetect": "ON",
"cmakePrelude": """set(__qt_ltcg_detected FALSE)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION)
set(__qt_ltcg_detected TRUE)
else()
foreach(config ${CMAKE_BUILD_TYPE} ${CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${config}" __qt_uc_config)
if(CMAKE_INTERPROCEDURAL_OPTIMIZATION_${__qt_uc_config})
set(__qt_ltcg_detected TRUE)
break()
endif()
endforeach()
unset(__qt_uc_config)
endif()""",
"condition": "__qt_ltcg_detected",
},
"msvc_mp": None,
"simulator_and_device": {"condition": "UIKIT AND NOT QT_UIKIT_SDK"},
"pkg-config": {"condition": "PKG_CONFIG_FOUND"},
"precompile_header": {"condition": "BUILD_WITH_PCH"},
"profile": None,
"qmakeargs": None,
"qpa_default_platform": None, # Not a bool!
"qreal": {
"condition": 'DEFINED QT_COORD_TYPE AND NOT QT_COORD_TYPE STREQUAL "double"',
"output": [
{"type": "define", "name": "QT_COORD_TYPE", "value": "${QT_COORD_TYPE}",},
{
"type": "define",
"name": "QT_COORD_TYPE_STRING",
"value": '\\"${QT_COORD_TYPE}\\"',
},
],
},
"reduce_exports": {"condition": "NOT MSVC",},
"release": None,
"release_tools": None,
"rpath": {
"autoDetect": "1",
"condition": "BUILD_SHARED_LIBS AND UNIX AND NOT WIN32 AND NOT ANDROID",
| |
'''Module containing core functions to perform the P-GPFA fit.
.. module:: engine
:synopsis: A useful module indeed.
.. moduleauthor: <NAME> <<EMAIL>>
'''
import inference
import learning
import util
import numpy as np
import scipy.io as sio
import scipy.optimize as op
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import copy
import time
import sys
import pdb
class PPGPFAfit():
'''
Poisson-GPFA model fit given a neural population spike data.
Input Attributes:
=================
* experiment : (util.dataset object), required
- A dataset object with the following attributes:
experiment.data - A list of dictionaries in the following format:
experiment.data[trial]['Y'] - numpy array of shape (#time bins, # neurons)
experiment.T - number of time bins, all trials must have the same length
experiment.trialDur - duration of each trial in ms
experiment.binSize - size of bin in ms
* initParams : (dict), required: initial parameter.
- Has the following fields:
initParams['C'] - a numpy array of shape (#neurons, #latent dimension to fit)
initParams['d'] - a numpy array of shape (#neurons)
initParams['tau'] - a numpy array of shape (#latent dimension), in seconds
* inferenceMethod : (str), optional
- Specifies the posterior Gaussian approximation method used in inference. Defaults to 'laplace'.
inferenceMethod = 'laplace' - uses laplace approximation (mean ~= mode)
inferenceMethod = 'variational' - uses variational inference
* maxEMiter : (int), optional
- number of maximum EM iteration, defaults to 50.
* EMmode : (str), optional
- If EMmode = 'Batch', performs batch EM, where inference is performed on all available trials.
- If EMmode = 'Online', performs online EM, where inference is performed only on smaller number of
subsampled trials. User can specify further details of online EM via the init attributes
onlineParamUpdateMethod and priorCovOpts.
* onlineParamUpdateMethod : (str)
- If 'balancingGamma', parameters are updated according to
params_{n+1} = (gamma[n])*params_{n} + (1-gamma[n])*argmax_{params}(M_step_cost_function(params)).
- If 'sequentialAverage', parameters are updated according to
params_{n+1} = (params_{n} + argmax_{params}(M_step_cost_function(params)))/2.
- If 'fullyUpdateAll', parameters are updated according to
params_{n+1} = argmax_{params}(M_step_cost_function(params)).
- If 'gradientDescent', parameters are updated according to
params_{n+1} = params_{n} + stepSize*inv(Hessian_{params_{n}})*Gradient_{params_{n}}.
- If 'fullyUpdateWithPrior', parameters are updated according to
params_{n+1} = argmax_{params}(M_step_cost_function_with_prior(params, prior)).
prior is specified by the attribute priorCovOpts.
-- gamma is a linearly spaced decreasing sequence of length maxEMiter ranging from 0 to 1.
* forceMaxIter : (bool), optional
- If True, EM iterations continue even after convergence criteria are met. Defaults to False.
Effective only if self.EMmode = 'Batch'.
* verbose : (bool), optional
- If True, the fitting process is printed in the console.
Resulting Attributes:
=====================
* optimParams - (dict), optimal parameter found
* paramSeq - (list), a list containing the parameters found in each EM iteration
* infRes - (dict), contains the information about inferred latent trajectories.
infRes['post_mean'][tr] - a numpy array of shape (xdim,T).
The inferred latent trajectory of trial tr.
infRes['post_cov'][tr] - a numpy array of shape (xdim*T,xdim*T).
The covariance of the inferred latent trajectory of trial tr.
* posteriorLikelihood - (list), poterior likelihood at each EM iteration.
* variationalLowerBound - (list), variational lower bound at each EM iteration.
This attribute only exists if inferenceMethod = 'variational'.
Resulting Methods:
==================
* plotTrajectory(tr) - plots the inferred trajectory and spike counts of trial tr.
* plotTrajectories() - plots the inferred trajectory of all trials.
* plotParamSeq() - plots some information about how the parameters change through EM iter.
* plotOptimParams() - plots the optimal parameters found.
* plotFitDetails() - plots some information about the fitting process as functions of EM iter.
'''
# np.seterr(all='ignore')
def __init__(self,
experiment,
initParams = None,
xdim = 2,
inferenceMethod = 'laplace',
maxEMiter = 50,
optimLogLamb = False,
CdOptimMethod = 'TNC',
tauOptimMethod = 'TNC',
verbose = False,
EMmode = 'Online',
batchSize = 5,
onlineParamUpdateMethod = 'diag',
hessTol = None,
stepPow = 0.75,
updateCdJointly = True,
fullyUpdateTau = False,
extractAllTraj = False,
extractAllTraj_trueParams = False,
getPredictionErr = False,
CdMaxIter = None,
tauMaxIter = None):
self.experiment = experiment
ydim, T = np.shape(experiment.data[0]['Y'])
trialDur = experiment.trialDur
numTrials = len(experiment.data)
binSize = experiment.binSize
if initParams == None:
initParams = util.initializeParams(xdim, ydim, experiment)
else:
_,xdim = np.shape(initParams['C'])
posteriorLikelihood = []
variationalLowerBound = []
learningDetails = []
params = initParams
paramSeq = []
paramSeq.append(initParams)
learningTime = [] # for profiling
inferenceTime = []
#!BatchEM
if EMmode == 'Batch':
print('+-------------------- Fit Options --------------------+')
util.Printer.stdout((str(xdim)+' |').rjust(int(55)))
util.Printer.stdout('| Dimensionality of Latent State: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(ydim)+' |').rjust(int(55)))
util.Printer.stdout('| Dimensionality of Observed State (# neurons): ')
sys.stdout.flush()
print()
util.Printer.stdout((str(EMmode)+' |').rjust(int(55)))
util.Printer.stdout('| EM mode: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(maxEMiter)+' |').rjust(int(55)))
util.Printer.stdout('| Max EM iterations: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(inferenceMethod)+' |').rjust(int(55)))
util.Printer.stdout('| Inference Method: ')
sys.stdout.flush()
print()
print('+-----------------------------------------------------+')
# EM loop
for i in range(maxEMiter):
# E step
before = time.time()
if inferenceMethod == 'laplace':
# if/else to use previous optimization result as initialization in inference
if i == 0:
infRes, nll, lapOptimRes = inference.laplace(
experiment = experiment,
params = params,
prevOptimRes = None,
verbose = verbose)
else:
infRes, nll, lapOptimRes = inference.laplace(
experiment = experiment,
params = params,
prevOptimRes = lapOptimRes,
verbose = verbose)
posteriorLikelihood.append(nll)
if inferenceMethod == 'variational':
# if/else to use previous optimization result as initialization for next iteration
if i == 0:
infRes, nll, vlb, varOptimRes = inference.dualVariational(
experiment = experiment,
params = params,
optimizeLogLambda = optimLogLamb,
prevOptimRes = None,
verbose = verbose)
else:
infRes, nll, vlb, varOptimRes = inference.dualVariational(
experiment = experiment,
params = params,
optimizeLogLambda = optimLogLamb,
prevOptimRes = varOptimRes,
verbose = verbose)
posteriorLikelihood.append(nll)
variationalLowerBound.append(vlb)
after = time.time()
inferenceTime.append(after-before)
# M step
before = time.time()
params, learnDet = learning.updateParams(
oldParams = params,
infRes = infRes,
experiment = experiment,
CdOptimMethod = CdOptimMethod)
after = time.time()
learningTime.append(after-before)
learningDetails.append(learnDet)
paramSeq.append(params)
# print message
if inferenceMethod == 'laplace':
output = 'Iteration: %3d of %3d, nPLL: = %.4f'%(i+1,maxEMiter,nll)
if inferenceMethod == 'variational':
output = 'Iteration: %3d of %3d, nPLL: = %.4f, VLB = %.4f'\
%(i+1,maxEMiter,nll,vlb)
util.Printer(output)
#!endBatchEM
#!onlineEM
if EMmode == 'Online':
print('+-------------------- Fit Options --------------------+')
util.Printer.stdout((str(xdim)+' |').rjust(int(55)))
util.Printer.stdout('| Dimensionality of Latent State: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(ydim)+' |').rjust(int(55)))
util.Printer.stdout('| Dimensionality of Observed State (# neurons): ')
sys.stdout.flush()
print()
util.Printer.stdout((str(EMmode)+' |').rjust(int(55)))
util.Printer.stdout('| EM mode: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(maxEMiter)+' |').rjust(int(55)))
util.Printer.stdout('| Max EM iterations: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(inferenceMethod)+' |').rjust(int(55)))
util.Printer.stdout('| Inference Method: ')
sys.stdout.flush()
print()
util.Printer.stdout(('`'+str(onlineParamUpdateMethod)+'`'+' |').rjust(int(55)))
util.Printer.stdout('| Online Param Update Method: ')
sys.stdout.flush()
print()
util.Printer.stdout((str(batchSize)+' |').rjust(int(55)))
util.Printer.stdout('| Batch size (trials): ')
sys.stdout.flush()
print()
print('+-----------------------------------------------------+')
gamma = np.linspace(0,1,maxEMiter)
regularizer_stepsize_Cd = 1/(np.arange(maxEMiter)+1)**(stepPow)
regularizer_stepsize_tau = 1/(np.arange(maxEMiter)+1)**(stepPow)
grad_descent_stepsize = 1/(np.arange(maxEMiter)+1)**stepPow
self.invPriorCovs = []
self.cumHess = []
# depending on whether updating C,d jointly or not, the size of the hessian to store differs
if updateCdJointly==True: self.invPriorCovs.append(np.diag(np.ones(xdim*ydim+ydim)))
if updateCdJointly==False: self.invPriorCovs.append(np.diag(np.ones(xdim*ydim)))
if updateCdJointly==True: self.cumHess.append(np.diag(np.ones(xdim*ydim+ydim)))
if updateCdJointly==False: self.cumHess.append(np.diag(np.ones(xdim*ydim)))
# EM Loop
seenTrialIdx = []
for n in range(maxEMiter):
# E-step
# stochasticity in online learning comes from subsampling trials
subsampledDat = util.subsampleTrials(experiment, batchSize)
seenTrialIdx.append(subsampledDat.batchTrIdx)
seenDat = util.seenTrials(experiment, seenTrialIdx)
before = time.time()
if inferenceMethod == 'laplace':
infRes, nll, lapOptimRes = inference.laplace(
experiment = subsampledDat,
params = params,
verbose = verbose)
posteriorLikelihood.append(nll)
if inferenceMethod == 'variational':
infRes, nll, vlb, varOptimRes = inference.dualVariational(
experiment = subsampledDat,
params = params,
optimizeLogLambda = optimLogLamb,
verbose = verbose)
posteriorLikelihood.append(nll)
variationalLowerBound.append(vlb)
after = time.time()
inferenceTime.append(after-before)
# M-step
before = time.time()
# Variants of naive online learning methods
if onlineParamUpdateMethod == 'balancingGamma':
newParams, learnDet = learning.updateParams(
oldParams = params,
infRes = infRes,
experiment = subsampledDat,
CdOptimMethod = CdOptimMethod,
CdMaxIter = CdMaxIter,
tauMaxIter = None,
verbose = verbose)
nextParams = newParams
nextParams['C'] = (gamma[n])*params['C'] + (1-gamma[n])*newParams['C']
nextParams['d'] = (gamma[n])*params['d'] + (1-gamma[n])*newParams['d']
nextParams['tau'] = (gamma[n])*params['tau'] + (1-gamma[n])*newParams['tau']
if onlineParamUpdateMethod == 'sequentialAverage':
newParams, learnDet = learning.updateParams(
oldParams = params,
infRes = infRes,
experiment = subsampledDat,
CdOptimMethod = CdOptimMethod,
CdMaxIter = CdMaxIter,
tauMaxIter = None,
verbose | |
of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
# parameters specific to GMM actor
self.num_modes = num_modes
self.min_std = min_std
self.low_noise_eval = low_noise_eval
self.use_tanh = use_tanh
# Define activations to use
self.activations = {
"softplus": F.softplus,
"exp": torch.exp,
}
assert std_activation in self.activations, \
"std_activation must be one of: {}; instead got: {}".format(self.activations.keys(), std_activation)
self.std_activation = std_activation
super(RNNGMMActorNetwork, self).__init__(
obs_shapes=obs_shapes,
ac_dim=ac_dim,
mlp_layer_dims=mlp_layer_dims,
rnn_hidden_dim=rnn_hidden_dim,
rnn_num_layers=rnn_num_layers,
rnn_type=rnn_type,
rnn_kwargs=rnn_kwargs,
goal_shapes=goal_shapes,
encoder_kwargs=encoder_kwargs,
)
def _get_output_shapes(self):
"""
Tells @MIMO_MLP superclass about the output dictionary that should be generated
at the last layer. Network outputs parameters of GMM distribution.
"""
return OrderedDict(
mean=(self.num_modes, self.ac_dim),
scale=(self.num_modes, self.ac_dim),
logits=(self.num_modes,),
)
def forward_train(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False):
"""
Return full GMM distribution, which is useful for computing
quantities necessary at train-time, like log-likelihood, KL
divergence, etc.
Args:
obs_dict (dict): batch of observations
goal_dict (dict): if not None, batch of goal observations
rnn_init_state: rnn hidden state, initialize to zero state if set to None
return_state (bool): whether to return hidden state
Returns:
dists (Distribution): sequence of GMM distributions over the timesteps
rnn_state: return rnn state at the end if return_state is set to True
"""
if self._is_goal_conditioned:
assert goal_dict is not None
# repeat the goal observation in time to match dimension with obs_dict
mod = list(obs_dict.keys())[0]
goal_dict = TensorUtils.unsqueeze_expand_at(goal_dict, size=obs_dict[mod].shape[1], dim=1)
outputs = RNN_MIMO_MLP.forward(
self, obs=obs_dict, goal=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state)
if return_state:
outputs, state = outputs
else:
state = None
means = outputs["mean"]
scales = outputs["scale"]
logits = outputs["logits"]
# apply tanh squashing to mean if not using tanh-GMM to ensure means are in [-1, 1]
if not self.use_tanh:
means = torch.tanh(means)
if self.low_noise_eval and (not self.training):
# low-noise for all Gaussian dists
scales = torch.ones_like(means) * 1e-4
else:
# post-process the scale accordingly
scales = self.activations[self.std_activation](scales) + self.min_std
# mixture components - make sure that `batch_shape` for the distribution is equal
# to (batch_size, timesteps, num_modes) since MixtureSameFamily expects this shape
component_distribution = D.Normal(loc=means, scale=scales)
component_distribution = D.Independent(component_distribution, 1) # shift action dim to event shape
# unnormalized logits to categorical distribution for mixing the modes
mixture_distribution = D.Categorical(logits=logits)
dists = D.MixtureSameFamily(
mixture_distribution=mixture_distribution,
component_distribution=component_distribution,
)
if self.use_tanh:
# Wrap distribution with Tanh
dists = TanhWrappedDistribution(base_dist=dists, scale=1.)
if return_state:
return dists, state
else:
return dists
def forward(self, obs_dict, goal_dict=None, rnn_init_state=None, return_state=False):
"""
Samples actions from the policy distribution.
Args:
obs_dict (dict): batch of observations
goal_dict (dict): if not None, batch of goal observations
Returns:
action (torch.Tensor): batch of actions from policy distribution
"""
out = self.forward_train(obs_dict=obs_dict, goal_dict=goal_dict, rnn_init_state=rnn_init_state, return_state=return_state)
if return_state:
ad, state = out
return ad.sample(), state
return out.sample()
def forward_train_step(self, obs_dict, goal_dict=None, rnn_state=None):
"""
Unroll RNN over single timestep to get action GMM distribution, which
is useful for computing quantities necessary at train-time, like
log-likelihood, KL divergence, etc.
Args:
obs_dict (dict): batch of observations. Should not contain
time dimension.
goal_dict (dict): if not None, batch of goal observations
rnn_state: rnn hidden state, initialize to zero state if set to None
Returns:
ad (Distribution): GMM action distributions
state: updated rnn state
"""
obs_dict = TensorUtils.to_sequence(obs_dict)
ad, state = self.forward_train(
obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True)
# to squeeze time dimension, make another action distribution
assert ad.component_distribution.base_dist.loc.shape[1] == 1
assert ad.component_distribution.base_dist.scale.shape[1] == 1
assert ad.mixture_distribution.logits.shape[1] == 1
component_distribution = D.Normal(
loc=ad.component_distribution.base_dist.loc.squeeze(1),
scale=ad.component_distribution.base_dist.scale.squeeze(1),
)
component_distribution = D.Independent(component_distribution, 1)
mixture_distribution = D.Categorical(logits=ad.mixture_distribution.logits.squeeze(1))
ad = D.MixtureSameFamily(
mixture_distribution=mixture_distribution,
component_distribution=component_distribution,
)
return ad, state
def forward_step(self, obs_dict, goal_dict=None, rnn_state=None):
"""
Unroll RNN over single timestep to get sampled actions.
Args:
obs_dict (dict): batch of observations. Should not contain
time dimension.
goal_dict (dict): if not None, batch of goal observations
rnn_state: rnn hidden state, initialize to zero state if set to None
Returns:
acts (torch.Tensor): batch of actions - does not contain time dimension
state: updated rnn state
"""
obs_dict = TensorUtils.to_sequence(obs_dict)
acts, state = self.forward(
obs_dict, goal_dict, rnn_init_state=rnn_state, return_state=True)
assert acts.shape[1] == 1
return acts[:, 0], state
def _to_string(self):
"""Info to pretty print."""
msg = "action_dim={}, std_activation={}, low_noise_eval={}, num_nodes={}, min_std={}".format(
self.ac_dim, self.std_activation, self.low_noise_eval, self.num_modes, self.min_std)
return msg
class VAEActor(Module):
"""
A VAE that models a distribution of actions conditioned on observations.
The VAE prior and decoder are used at test-time as the policy.
"""
def __init__(
self,
obs_shapes,
ac_dim,
encoder_layer_dims,
decoder_layer_dims,
latent_dim,
device,
decoder_is_conditioned=True,
decoder_reconstruction_sum_across_elements=False,
latent_clip=None,
prior_learn=False,
prior_is_conditioned=False,
prior_layer_dims=(),
prior_use_gmm=False,
prior_gmm_num_modes=10,
prior_gmm_learn_weights=False,
prior_use_categorical=False,
prior_categorical_dim=10,
prior_categorical_gumbel_softmax_hard=False,
goal_shapes=None,
encoder_kwargs=None,
):
"""
Args:
obs_shapes (OrderedDict): a dictionary that maps modality to
expected shapes for observations.
ac_dim (int): dimension of action space.
goal_shapes (OrderedDict): a dictionary that maps modality to
expected shapes for goal observations.
encoder_kwargs (dict or None): If None, results in default encoder_kwargs being applied. Otherwise, should
be nested dictionary containing relevant per-modality information for encoder networks.
Should be of form:
obs_modality1: dict
feature_dimension: int
core_class: str
core_kwargs: dict
...
...
obs_randomizer_class: str
obs_randomizer_kwargs: dict
...
...
obs_modality2: dict
...
"""
super(VAEActor, self).__init__()
self.obs_shapes = obs_shapes
self.ac_dim = ac_dim
action_shapes = OrderedDict(action=(self.ac_dim,))
# ensure VAE decoder will squash actions into [-1, 1]
output_squash = ['action']
output_scales = OrderedDict(action=1.)
self._vae = VAE(
input_shapes=action_shapes,
output_shapes=action_shapes,
encoder_layer_dims=encoder_layer_dims,
decoder_layer_dims=decoder_layer_dims,
latent_dim=latent_dim,
device=device,
condition_shapes=self.obs_shapes,
decoder_is_conditioned=decoder_is_conditioned,
decoder_reconstruction_sum_across_elements=decoder_reconstruction_sum_across_elements,
latent_clip=latent_clip,
output_squash=output_squash,
output_scales=output_scales,
prior_learn=prior_learn,
prior_is_conditioned=prior_is_conditioned,
prior_layer_dims=prior_layer_dims,
prior_use_gmm=prior_use_gmm,
prior_gmm_num_modes=prior_gmm_num_modes,
prior_gmm_learn_weights=prior_gmm_learn_weights,
prior_use_categorical=prior_use_categorical,
prior_categorical_dim=prior_categorical_dim,
prior_categorical_gumbel_softmax_hard=prior_categorical_gumbel_softmax_hard,
goal_shapes=goal_shapes,
encoder_kwargs=encoder_kwargs,
)
def encode(self, actions, obs_dict, goal_dict=None):
"""
Args:
actions (torch.Tensor): a batch of actions
obs_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. These should correspond to the observation modalities
used for conditioning in either the decoder or the prior (or both).
goal_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. These should correspond to goal modalities.
Returns:
posterior params (dict): dictionary with the following keys:
mean (torch.Tensor): posterior encoder means
logvar (torch.Tensor): posterior encoder logvars
"""
inputs = OrderedDict(action=actions)
return self._vae.encode(inputs=inputs, conditions=obs_dict, goals=goal_dict)
def decode(self, obs_dict=None, goal_dict=None, z=None, n=None):
"""
Thin wrapper around @VaeNets.VAE implementation.
Args:
obs_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. Only needs to be provided if @decoder_is_conditioned
or @z is None (since the prior will require it to generate z).
goal_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. These should correspond to goal modalities.
z (torch.Tensor): if provided, these latents are used to generate
reconstructions from the VAE, and the prior is not sampled.
n (int): this argument is used to specify the number of samples to
generate from the prior. Only required if @z is None - i.e.
sampling takes place
Returns:
recons (dict): dictionary of reconstructed inputs (this will be a dictionary
with a single "action" key)
"""
return self._vae.decode(conditions=obs_dict, goals=goal_dict, z=z, n=n)
def sample_prior(self, obs_dict=None, goal_dict=None, n=None):
"""
Thin wrapper around @VaeNets.VAE implementation.
Args:
n (int): this argument is used to specify the number
of samples to generate from the prior.
obs_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. Only needs to be provided if @prior_is_conditioned.
goal_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. These should correspond to goal modalities.
Returns:
z (torch.Tensor): latents sampled from the prior
"""
return self._vae.sample_prior(n=n, conditions=obs_dict, goals=goal_dict)
def set_gumbel_temperature(self, temperature):
"""
Used by external algorithms to schedule Gumbel-Softmax temperature,
which is used during reparametrization at train-time. Should only be
used if @prior_use_categorical is True.
"""
self._vae.set_gumbel_temperature(temperature)
def get_gumbel_temperature(self):
"""
Return current Gumbel-Softmax temperature. Should only be used if
@prior_use_categorical is True.
"""
return self._vae.get_gumbel_temperature()
def output_shape(self, input_shape=None):
"""
This implementation is required by the Module superclass, but is unused since we
never chain this module to other ones.
"""
return [self.ac_dim]
def forward_train(self, actions, obs_dict, goal_dict=None, freeze_encoder=False):
"""
A full pass through the VAE network used during training to construct KL
and reconstruction losses. See @VAE class for more info.
Args:
actions (torch.Tensor): a batch of actions
obs_dict (dict): a dictionary that maps modalities to torch.Tensor
batches. These should correspond to the observation modalities
used for conditioning in | |
+ m.b17 - m.b62 <= 0)
m.c2339 = Constraint(expr= - m.b2 + m.b18 - m.b63 <= 0)
m.c2340 = Constraint(expr= - m.b2 + m.b19 - m.b64 <= 0)
m.c2341 = Constraint(expr= - m.b2 + m.b20 - m.b65 <= 0)
m.c2342 = Constraint(expr= - m.b2 + m.b21 - m.b66 <= 0)
m.c2343 = Constraint(expr= - m.b2 + m.b22 - m.b67 <= 0)
m.c2344 = Constraint(expr= - m.b2 + m.b23 - m.b68 <= 0)
m.c2345 = Constraint(expr= - m.b2 + m.b24 - m.b69 <= 0)
m.c2346 = Constraint(expr= - m.b3 + m.b4 - m.b70 <= 0)
m.c2347 = Constraint(expr= - m.b3 + m.b5 - m.b71 <= 0)
m.c2348 = Constraint(expr= - m.b3 + m.b6 - m.b72 <= 0)
m.c2349 = Constraint(expr= - m.b3 + m.b7 - m.b73 <= 0)
m.c2350 = Constraint(expr= - m.b3 + m.b8 - m.b74 <= 0)
m.c2351 = Constraint(expr= - m.b3 + m.b9 - m.b75 <= 0)
m.c2352 = Constraint(expr= - m.b3 + m.b10 - m.b76 <= 0)
m.c2353 = Constraint(expr= - m.b3 + m.b11 - m.b77 <= 0)
m.c2354 = Constraint(expr= - m.b3 + m.b12 - m.b78 <= 0)
m.c2355 = Constraint(expr= - m.b3 + m.b13 - m.b79 <= 0)
m.c2356 = Constraint(expr= - m.b3 + m.b14 - m.b80 <= 0)
m.c2357 = Constraint(expr= - m.b3 + m.b15 - m.b81 <= 0)
m.c2358 = Constraint(expr= - m.b3 + m.b16 - m.b82 <= 0)
m.c2359 = Constraint(expr= - m.b3 + m.b17 - m.b83 <= 0)
m.c2360 = Constraint(expr= - m.b3 + m.b18 - m.b84 <= 0)
m.c2361 = Constraint(expr= - m.b3 + m.b19 - m.b85 <= 0)
m.c2362 = Constraint(expr= - m.b3 + m.b20 - m.b86 <= 0)
m.c2363 = Constraint(expr= - m.b3 + m.b21 - m.b87 <= 0)
m.c2364 = Constraint(expr= - m.b3 + m.b22 - m.b88 <= 0)
m.c2365 = Constraint(expr= - m.b3 + m.b23 - m.b89 <= 0)
m.c2366 = Constraint(expr= - m.b3 + m.b24 - m.b90 <= 0)
m.c2367 = Constraint(expr= - m.b4 + m.b5 - m.b91 <= 0)
m.c2368 = Constraint(expr= - m.b4 + m.b6 - m.b92 <= 0)
m.c2369 = Constraint(expr= - m.b4 + m.b7 - m.b93 <= 0)
m.c2370 = Constraint(expr= - m.b4 + m.b8 - m.b94 <= 0)
m.c2371 = Constraint(expr= - m.b4 + m.b9 - m.b95 <= 0)
m.c2372 = Constraint(expr= - m.b4 + m.b10 - m.b96 <= 0)
m.c2373 = Constraint(expr= - m.b4 + m.b11 - m.b97 <= 0)
m.c2374 = Constraint(expr= - m.b4 + m.b12 - m.b98 <= 0)
m.c2375 = Constraint(expr= - m.b4 + m.b13 - m.b99 <= 0)
m.c2376 = Constraint(expr= - m.b4 + m.b14 - m.b100 <= 0)
m.c2377 = Constraint(expr= - m.b4 + m.b15 - m.b101 <= 0)
m.c2378 = Constraint(expr= - m.b4 + m.b16 - m.b102 <= 0)
m.c2379 = Constraint(expr= - m.b4 + m.b17 - m.b103 <= 0)
m.c2380 = Constraint(expr= - m.b4 + m.b18 - m.b104 <= 0)
m.c2381 = Constraint(expr= - m.b4 + m.b19 - m.b105 <= 0)
m.c2382 = Constraint(expr= - m.b4 + m.b20 - m.b106 <= 0)
m.c2383 = Constraint(expr= - m.b4 + m.b21 - m.b107 <= 0)
m.c2384 = Constraint(expr= - m.b4 + m.b22 - m.b108 <= 0)
m.c2385 = Constraint(expr= - m.b4 + m.b23 - m.b109 <= 0)
m.c2386 = Constraint(expr= - m.b4 + m.b24 - m.b110 <= 0)
m.c2387 = Constraint(expr= - m.b5 + m.b6 - m.b111 <= 0)
m.c2388 = Constraint(expr= - m.b5 + m.b7 - m.b112 <= 0)
m.c2389 = Constraint(expr= - m.b5 + m.b8 - m.b113 <= 0)
m.c2390 = Constraint(expr= - m.b5 + m.b9 - m.b114 <= 0)
m.c2391 = Constraint(expr= - m.b5 + m.b10 - m.b115 <= 0)
m.c2392 = Constraint(expr= - m.b5 + m.b11 - m.b116 <= 0)
m.c2393 = Constraint(expr= - m.b5 + m.b12 - m.b117 <= 0)
m.c2394 = Constraint(expr= - m.b5 + m.b13 - m.b118 <= 0)
m.c2395 = Constraint(expr= - m.b5 + m.b14 - m.b119 <= 0)
m.c2396 = Constraint(expr= - m.b5 + m.b15 - m.b120 <= 0)
m.c2397 = Constraint(expr= - m.b5 + m.b16 - m.b121 <= 0)
m.c2398 = Constraint(expr= - m.b5 + m.b17 - m.b122 <= 0)
m.c2399 = Constraint(expr= - m.b5 + m.b18 - m.b123 <= 0)
m.c2400 = Constraint(expr= - m.b5 + m.b19 - m.b124 <= 0)
m.c2401 = Constraint(expr= - m.b5 + m.b20 - m.b125 <= 0)
m.c2402 = Constraint(expr= - m.b5 + m.b21 - m.b126 <= 0)
m.c2403 = Constraint(expr= - m.b5 + m.b22 - m.b127 <= 0)
m.c2404 = Constraint(expr= - m.b5 + m.b23 - m.b128 <= 0)
m.c2405 = Constraint(expr= - m.b5 + m.b24 - m.b129 <= 0)
m.c2406 = Constraint(expr= - m.b6 + m.b7 - m.b130 <= 0)
m.c2407 = Constraint(expr= - m.b6 + m.b8 - m.b131 <= 0)
m.c2408 = Constraint(expr= - m.b6 + m.b9 - m.b132 <= 0)
m.c2409 = Constraint(expr= - m.b6 + m.b10 - m.b133 <= 0)
m.c2410 = Constraint(expr= - m.b6 + m.b11 - m.b134 <= 0)
m.c2411 = Constraint(expr= - m.b6 + m.b12 - m.b135 <= 0)
m.c2412 = Constraint(expr= - m.b6 + m.b13 - m.b136 <= 0)
m.c2413 = Constraint(expr= - m.b6 + m.b14 - m.b137 <= 0)
m.c2414 = Constraint(expr= - m.b6 + m.b15 - m.b138 <= 0)
m.c2415 = Constraint(expr= - m.b6 + m.b16 - m.b139 <= 0)
m.c2416 = Constraint(expr= - m.b6 + m.b17 - m.b140 <= 0)
m.c2417 = Constraint(expr= - m.b6 + m.b18 - m.b141 <= 0)
m.c2418 = Constraint(expr= - m.b6 + m.b19 - m.b142 <= 0)
m.c2419 = Constraint(expr= - m.b6 + m.b20 - m.b143 <= 0)
m.c2420 = Constraint(expr= - m.b6 + m.b21 - m.b144 <= 0)
m.c2421 = Constraint(expr= - m.b6 + m.b22 - m.b145 <= 0)
m.c2422 = Constraint(expr= - m.b6 + m.b23 - m.b146 <= 0)
m.c2423 = Constraint(expr= - m.b6 + m.b24 - m.b147 <= 0)
m.c2424 = Constraint(expr= - m.b7 + m.b8 - m.b148 <= 0)
m.c2425 = Constraint(expr= - m.b7 + m.b9 - m.b149 <= 0)
m.c2426 = Constraint(expr= - m.b7 + m.b10 - m.b150 <= 0)
m.c2427 = Constraint(expr= - m.b7 + m.b11 - m.b151 <= 0)
m.c2428 = Constraint(expr= - m.b7 + m.b12 - m.b152 <= 0)
m.c2429 = Constraint(expr= - m.b7 + m.b13 - m.b153 <= 0)
m.c2430 = Constraint(expr= - m.b7 + m.b14 - m.b154 <= 0)
m.c2431 = Constraint(expr= - m.b7 + m.b15 - m.b155 <= 0)
m.c2432 = Constraint(expr= - m.b7 + m.b16 - m.b156 <= 0)
m.c2433 = Constraint(expr= - m.b7 + m.b17 - m.b157 <= 0)
m.c2434 = Constraint(expr= - m.b7 + m.b18 - m.b158 <= 0)
m.c2435 = Constraint(expr= - m.b7 + m.b19 - m.b159 <= 0)
m.c2436 = Constraint(expr= - m.b7 + m.b20 - m.b160 <= 0)
m.c2437 = Constraint(expr= - m.b7 + m.b21 - m.b161 <= 0)
m.c2438 = Constraint(expr= - m.b7 + m.b22 - m.b162 <= 0)
m.c2439 = Constraint(expr= - m.b7 + m.b23 - m.b163 <= 0)
m.c2440 = Constraint(expr= - m.b7 + m.b24 - m.b164 <= 0)
m.c2441 = Constraint(expr= - m.b8 + m.b9 - m.b165 <= 0)
m.c2442 = Constraint(expr= - m.b8 + m.b10 - m.b166 <= 0)
m.c2443 = Constraint(expr= - m.b8 + m.b11 - m.b167 <= 0)
m.c2444 = Constraint(expr= - m.b8 + m.b12 - m.b168 <= 0)
m.c2445 = Constraint(expr= - m.b8 + m.b13 - m.b169 <= 0)
m.c2446 = Constraint(expr= - m.b8 + m.b14 - m.b170 <= 0)
m.c2447 = Constraint(expr= - m.b8 + m.b15 - m.b171 <= 0)
m.c2448 = Constraint(expr= - m.b8 + m.b16 - m.b172 <= 0)
m.c2449 = Constraint(expr= - m.b8 + m.b17 - m.b173 <= 0)
m.c2450 = Constraint(expr= - m.b8 + m.b18 - m.b174 <= 0)
m.c2451 = Constraint(expr= - m.b8 + m.b19 - m.b175 <= 0)
m.c2452 = Constraint(expr= - m.b8 + m.b20 - m.b176 <= 0)
m.c2453 = Constraint(expr= - m.b8 + m.b21 - m.b177 <= 0)
m.c2454 = Constraint(expr= - m.b8 + m.b22 - m.b178 <= 0)
m.c2455 = Constraint(expr= - m.b8 + m.b23 - m.b179 <= 0)
m.c2456 = Constraint(expr= - m.b8 + m.b24 - m.b180 <= 0)
m.c2457 = Constraint(expr= - m.b9 + m.b10 - m.b181 <= 0)
m.c2458 = Constraint(expr= - m.b9 + m.b11 - m.b182 <= 0)
m.c2459 = Constraint(expr= - m.b9 + m.b12 - m.b183 <= 0)
m.c2460 = Constraint(expr= - m.b9 + m.b13 - m.b184 <= 0)
m.c2461 = Constraint(expr= - m.b9 + m.b14 - m.b185 <= 0)
m.c2462 = Constraint(expr= - m.b9 + m.b15 - m.b186 <= 0)
m.c2463 = Constraint(expr= - m.b9 + m.b16 - m.b187 <= 0)
m.c2464 = Constraint(expr= - m.b9 + m.b17 - m.b188 <= 0)
m.c2465 = Constraint(expr= - m.b9 + m.b18 - m.b189 <= 0)
m.c2466 = Constraint(expr= - m.b9 | |
mutation at nuc_pos
- codon_start_pos: 1based genome position of beginning of codon containing the mutation at nuc_pos
- codon_end_pos: 1based genome position of end of codon containing the mutation at nuc_pos
- ref_nuc_seq_dict: SeqIO dict
SeqIO Dict containing reference genomic sequence.
Should have format {"MN908947.3": SeqRecord of genome nucleotide sequence}
Returns:
==============
- valid_syn_df: pandas.DataFrame
Makes a copy of the input dataframe and adds new columns:
- codon_to: the mutated codon covering position nuc_pos
- aa_to: the amino acid translation of codon_to
valid_syn_df will only contain the rows in which the nucleotide position corresponds to a
synonymous mutation
"""
nuc_mut_trans_df = ( nuc_mut_df
.groupby(["seqHash",
"gene", "cds_num", "codon_start_pos", "codon_end_pos"])
.apply(get_mutated_codon, ref_nuc_seq_dict=ref_nuc_seq_dict)
)
valid_syn_df = nuc_mut_trans_df.loc[
(nuc_mut_trans_df["aa_from"] == nuc_mut_trans_df["aa_to_translated"]) &
(nuc_mut_trans_df["aa_to_translated"] != "")
].reset_index(drop=True)
valid_syn_df = valid_syn_df.rename(columns={"aa_to_translated": "aa_to"})
return valid_syn_df
def translate_snps(genes_tsv, ref_nuc_fasta_filename, ref_aa_fasta_filename,
nuc_mut_tsv, aa_mut_tsv,
snp_aa_link_tsv,
gene_overlap_tsv=None):
"""
Links SNPs to known amino acid substitutions from the output of
the grapevine variant pipeline.
The grapevine variant pipeline outputs SNPs and amino acid substitutions (synonymous and nonsynonymous)
in separate files. Although it directly converts SNPs to the amino acid substitutions,
it never writes the linkage down. So we need to calculate it ourselves.
Parameters:
==============
- genes_tsv: str
Path to TSV of gene coordinates.
Should have columns:
- start: nucleotide start position of gene (CDS) coding sequence with respect to genome, 1 based
- end: nucleotide end position of gene (CDS) coding sequence with respect to genome, 1 based
- gene: gene name
- cds_num: position of the (CDS) coding sequence within the gene, 0-based.
A gene can have multiple coding sequences, and they can overlap each other, for
example if there is programmed ribosomal slippage that causes translation to frameshift backwards/forwards.
- ref_nuc_fasta_filename: str
Path to reference nucleotide fasta
- ref_aa_fasta_filename: str
Path to reference amino acid fasta
- nuc_mut_tsv: str
path to TSV of SNPs.
Expects that each SNP is on a separate line.
Columns should be: seqHash, SNP
For SNP, format should be "<nuc from><nuc pos><nuc to>"
- aa_mut_tsv: str
Path to TSV of amino acid substitutions.
Expects that each substitution is on a separate line.
Columns should be: seqHash, aa_mutation.
For aa_mutation:
- Synonymous substitutions will have format: synSNP:<nuc from><nuc pos><nuc to>
- Nonsynonymous substitutions will have format gene:<aa from><aa pos><aa to>
- snp_aa_link_tsv: str
Path to output TSV to write nucleotide to amino acid mutation links.
Will have columns: ["seqHash",
"genome_mutation.genome", "genome_mutation.pos", "genome_mutation.ref", "genome_mutation.alt",
"protein_mutation.gene", "protein_mutation.pos", "protein_mutation.ref", "protein_mutation.alt"]
- gene_overlap_tsv: str
path to input TSV of coordinates of gene overlap regions.
Expects columns to be: start, end, gene_cds
gene_cds column format should be: <gene>_cds<0 based cds number within gene>
Returns:
==============
tuple (link_mut_df, link_mut_ann_df)
- link_mut_df: pandas.DataFrame
Dataframe for the nucleotide to amino acid mutation linkage with the columns:
["seqHash",
"genome_mutation.genome", "genome_mutation.pos", "genome_mutation.ref", "genome_mutation.alt",
"protein_mutation.gene", "protein_mutation.pos", "protein_mutation.ref", "protein_mutation.alt"]
"""
ref_nuc_seq_dict = SeqIO.to_dict(SeqIO.parse(ref_nuc_fasta_filename, "fasta"))
ref_aa_seq_dict = SeqIO.to_dict(SeqIO.parse(ref_aa_fasta_filename, "fasta"))
if gene_overlap_tsv:
known_overlaps_df = pd.read_csv(gene_overlap_tsv, sep="\t", comment='#')
else:
known_overlaps_df = pd.DataFrame(columns=["start", "end", "gene_cds"])
gene_df = pd.read_csv(genes_tsv, sep="\t", comment="#")
gene_df["aa_length"] = (gene_df["end"] - gene_df["start"] + 1) / 3
# Check that distance between end and start is in multiples of 3
# ie check that start and end correspond to codon start and end
assert np.sum((gene_df["end"] - gene_df["start"] + 1) % 3 != 0) == 0
gene_df["aa_length"] = gene_df["aa_length"].astype(int)
# columns: seqHash, aa_mutation
aa_mut_df = pd.read_csv(aa_mut_tsv, sep="\t", comment="#")
# There might be samples with no amino acid mutations.
# We drop any samples with empty amino acid mutations to
# make merging easier
aa_mut_df = aa_mut_df.dropna()
if aa_mut_df.shape[0] < 1:
nonsyn_mut_df = pd.DataFrame(columns=["gene", "cds_num", "aa_mutation", "aa_from", "aa_pos", "aa_to"])
syn_mut_df = pd.DataFrame(columns=["nuc_from", "nuc_pos", "nuc_to",
"gene", "cds_num", "aa_mutation", "aa_from", "aa_pos", "aa_to"])
else:
# Split up the nonsynonymous and synonymous substitutions from the aa_mut_df,
# because we need to treat them differently
nonsyn_mut_df = aa_mut_df[~aa_mut_df["aa_mutation"].str.startswith("synSNP")].copy().reset_index(drop=True)
nonsyn_mut_df[["gene", "aa_from_pos_to"]] = nonsyn_mut_df["aa_mutation"].str.split(":", expand=True)
nonsyn_mut_df[["aa_from", "aa_pos", "aa_to"]] = (nonsyn_mut_df["aa_from_pos_to"]
.str.extract(r"([A-Z\*])([0-9]+)([A-Z\*]*)", expand=True))
# type int won't allow NA values but type Int64 will.
# But oddly, we need to cast to float before we can cast to int64
nonsyn_mut_df["aa_pos"] = nonsyn_mut_df["aa_pos"].astype('float').astype('Int64')
syn_mut_df = aa_mut_df[aa_mut_df["aa_mutation"].str.startswith("synSNP")].copy().reset_index(drop=True)
syn_mut_df[["consequence", "nuc_from_pos_to"]] = syn_mut_df["aa_mutation"].str.split(":", expand=True)
syn_mut_df[["nuc_from", "nuc_pos", "nuc_to"]] = syn_mut_df["nuc_from_pos_to"].str.extract(r"([A-Z])([0-9]+)([A-Z])", expand=True)
syn_mut_df["nuc_pos"] = syn_mut_df["nuc_pos"].astype(float).astype("Int64")
# Has columns: seqHash, aa_mutation, nuc_from, nuc_pos, nuc_to
# Also has throwaway columns: consequence, nuc_from_pos_to.
# Append columns: gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# Each row represents a SNP that we know should lead to a synonymous substitution (according to gofasta)
# If a SNP happens to cover multiple amino acid positions because it hits an overlapping gene region, overlapping coding region,
# we add another row to represent each SNP - amino acid position mapping.
syn_mut_df = convert_nuc_pos_to_aa_pos(gene_df=gene_df, nuc_mut_df=syn_mut_df)
syn_mut_df["aa_pos"] = syn_mut_df["aa_pos"].astype('float').astype('Int64')
# https://stackoverflow.com/questions/43196907/valueerror-wrong-number-of-items-passed-meaning-and-suggestions
# apply on empty dataframe borks:
if syn_mut_df.shape[0] > 0:
syn_mut_df["aa_from"] = syn_mut_df.apply(get_aa_at_gene_pos, axis="columns", ref_aa_seq_dict=ref_aa_seq_dict)
else:
syn_mut_df["aa_from"] = ""
# Has columns: seqHash, aa_mutation, nuc_from, nuc_pos, nuc_to, gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# Also has throwaway columns: consequence, nuc_from_pos_to.
# Append columns: codon_to, aa_to. codon_to is a throwaway column we won't use later.
# Cull the rows such that only SNP - amino acid position mappings
# that result in synonymous substitutions exist.
syn_mut_df = convert_nuc_mut_to_aa(ref_nuc_seq_dict=ref_nuc_seq_dict, nuc_mut_df=syn_mut_df)
# Columns: seqHash, SNP
nuc_mut_df = pd.read_csv(nuc_mut_tsv, sep="\t", comment="#")
# There might be samples with no SNPs
# We drop those samples to make merging easier
nuc_mut_df = nuc_mut_df.dropna()
if nuc_mut_df.shape[0] > 0:
nuc_mut_df[["nuc_from", "nuc_pos", "nuc_to"]] = nuc_mut_df["SNP"].str.extract(r"([A-Z]*)([0-9]+)([A-Z]*)", expand=True)
nuc_mut_df["nuc_pos"] = nuc_mut_df["nuc_pos"].astype(float).astype("Int64")
# Has Columns: seqHash, SNP, nuc_from, nuc_to, nuc_pos.
# Append columns: gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# We want each row to represent a SNP - amino acid position mapping.
# If a SNP happens to cover multiple amino acid positions because it hits an overlapping gene region, overlapping coding region,
# the SNP will be repeated in multiple rows, one for each amino acid position mapping.
nuc_mut_df = convert_nuc_pos_to_aa_pos(gene_df=gene_df, nuc_mut_df=nuc_mut_df)
# Has Columns: seqHash, SNP, nuc_from, nuc_to, nuc_pos, gene, cds_num, aa_pos, codon_start_pos, codon_end_pos
# Append: aa_from
nuc_mut_df["aa_from"] = nuc_mut_df.apply(get_aa_at_gene_pos, axis="columns", ref_aa_seq_dict=ref_aa_seq_dict)
# Now link nucleotide mutations with amino acid substitutions
if nuc_mut_df.shape[0] == 0 and (nonsyn_mut_df.shape[0] > 0 or syn_mut_df.shape[0] > 0):
raise ValueError("Invalid: We have nonsynonymous or synonymous AA substitutions, but no SNPs.")
# Handle situtations in which samples have no mutations at all
if nuc_mut_df.shape[0] == 0 and nonsyn_mut_df.shape[0] == 0 and syn_mut_df.shape[0] == 0:
link_mut_df = pd.DataFrame(columns=[
"seqHash",
"nuc_from", "nuc_pos", "nuc_to", "SNP",
"aa_from", "aa_pos", "aa_to",
"gene", "cds_num", "aa_mutation"])
# Handle situations where samples only have SNPs in non-gene regions
elif nuc_mut_df.shape[0] > 0 and (nonsyn_mut_df.shape[0] == 0 and syn_mut_df.shape[0] == 0):
link_mut_df = nuc_mut_df.copy()
link_mut_df["aa_to"] = ""
# Handle situations in which samples have SNPs
# as well as nonsynonymous or synonymous substitutions
else:
# Multiple SNPs might contribute to a single nonsyn amino acid substitution.
# So there may be nonsyn_mut_df rows that get duplicated against multiple nuc_mut_df rows.
# A single SNP position may map to multiple AA positions if they are in overlapping genes or coding regions.
if nonsyn_mut_df.shape[0] == 0:
link_nonsyn_mut_df = pd.DataFrame(
columns=["seqHash", "gene", "nuc_from", "nuc_pos", "nuc_to", "SNP",
"aa_from", "aa_pos", "aa_to", "aa_mutation"])
else:
# Columns after merging (but not necessarily in that order):
# seqHash, SNP, nuc_from, nuc_to, nuc_pos,
# gene, cds_num, codon_start_pos, codon_end_pos,
# aa_pos, aa_from, aa_to, aa_mutation,
# nuc_from_pos_to, aa_from_pos_to
link_nonsyn_mut_df = nuc_mut_df.merge(nonsyn_mut_df, how="right",
left_on=["seqHash", "gene", "aa_from", "aa_pos"],
right_on=["seqHash", "gene", "aa_from", "aa_pos"])
# are there nonsyn mutations that don't have a corresponding SNP?
if np.sum(~link_nonsyn_mut_df["aa_mutation"].isna() & link_nonsyn_mut_df["SNP"].isna()) > 0:
print(link_nonsyn_mut_df)
print(link_nonsyn_mut_df[link_nonsyn_mut_df["SNP"].isna()])
raise ValueError("There are nonynonymous AA substitutions that don't have a corresponding SNP")
if syn_mut_df.shape[0] == 0:
link_syn_mut_df = pd.DataFrame(columns=[
"seqHash",
"SNP", "nuc_from", "nuc_pos", "nuc_to",
"aa_mutation", "gene", "aa_from", "aa_pos", "aa_to"])
else:
# Columns after merging (but not necessarily in that order):
# seqHash, SNP, nuc_from, nuc_to, nuc_pos,
# | |
import networkx as nx
import numpy as np
import scipy
from numba import jit
from scipy.sparse import isspmatrix
from scipy.special import comb
from . import comdet_functions as cd
from . import cp_functions as cp
def compute_neighbours(adj):
lista_neigh = []
for ii in np.arange(adj.shape[0]):
lista_neigh.append(adj[ii, :].nonzero()[0])
return lista_neigh
@jit(nopython=True)
def compute_cn(adjacency):
""" Computes common neighbours table, each entry i,j of this table is the
number of common neighbours between i and j.
:param adjacency: Adjacency matrix.
:type adjacency: numpy.ndarray
:return: Common neighbours table.
:rtype: numpy.ndarray
"""
cn_table = np.zeros_like(adjacency)
for i in np.arange(adjacency.shape[0]):
neighbour_i = (adjacency[i, :] + adjacency[:, i]).astype(np.bool_)
for j in np.arange(i + 1, adjacency.shape[0]):
neighbour_j = (adjacency[j, :] + adjacency[:, j]).astype(np.bool_)
cn_table[i, j] = cn_table[j, i] = np.multiply(neighbour_i,
neighbour_j).sum()
return cn_table
@jit(nopython=True)
def common_neigh_init_guess_strong(adjacency):
"""Generates a preprocessed initial guess based on the common neighbours
of nodes. It makes a stronger aggregation of nodes based on
the common neighbours similarity.
:param adjacency: Adjacency matrix.
:type adjacency: numpy.ndarray
:return: Initial guess for nodes memberships.
:rtype: np.array
"""
cn_table = compute_cn(adjacency)
memberships = np.array(
[k for k in np.arange(adjacency.shape[0], dtype=np.int32)])
argsorted = np.argsort(adjacency.astype(np.bool_).sum(axis=1))[::-1]
for aux_node1 in argsorted:
aux_tmp = memberships == aux_node1
memberships[aux_tmp] = memberships[np.argmax(cn_table[aux_node1])]
return memberships
@jit(nopython=True)
def common_neigh_init_guess_weak(adjacency):
"""Generates a preprocessed initial guess based on the common neighbours
of nodes. It makes a weaker aggregation of nodes based on
the common neighbours similarity.
:param adjacency: Adjacency matrix.
:type adjacency: numpy.ndarray
:return: Initial guess for nodes memberships.
:rtype: np.array
"""
cn_table = compute_cn(adjacency)
memberships = np.array(
[k for k in np.arange(adjacency.shape[0], dtype=np.int32)])
degree = (adjacency.astype(np.bool_).sum(axis=1)
+ adjacency.astype(np.bool_).sum(axis=0))
avg_degree = np.mean(degree)
argsorted = np.argsort(degree)[::-1]
for aux_node1 in argsorted:
if degree[aux_node1] >= avg_degree:
aux_tmp = memberships == aux_node1
memberships[aux_tmp] = memberships[np.argmax(cn_table[aux_node1])]
return memberships
def eigenvector_init_guess(adjacency, is_directed):
"""Generates an initial guess for core periphery detection method: nodes
with higher eigenvector centrality are in the core.
:param adjacency: Adjacency matrix.
:type adjacency: np.ndarray
:param is_directed: True if the network is directed.
:type is_directed: bool
:return: Initial guess.
:rtype: np.ndarray
"""
# TODO: Vedere come funziona la parte pesata
n_nodes = adjacency.shape[0]
aux_nodes = int(np.ceil((n_nodes * 5) / 100))
if is_directed:
graph = nx.from_numpy_array(adjacency, create_using=nx.DiGraph)
centra = nx.eigenvector_centrality_numpy(graph)
centra1 = np.array([centra[key] for key in centra])
membership = np.ones_like(centra1, dtype=np.int32)
membership[np.argsort(centra1)[::-1][:aux_nodes]] = 0
else:
graph = nx.from_numpy_array(adjacency, create_using=nx.Graph)
centra = nx.eigenvector_centrality_numpy(graph)
centra1 = np.array([centra[key] for key in centra])
membership = np.ones_like(centra1, dtype=np.int32)
membership[np.argsort(centra1)[::-1][:aux_nodes]] = 0
return membership
def fixed_clusters_init_guess_cn(adjacency, n_clust):
""" Generates an intial guess with a fixed number 'n' of clusters.
Nodes are organised in clusters based on the number of common neighbors.
The starting members of clusters are the 'n' nodes with higher
degrees/strengths.
:param adjacency: Adjacency matrix.
:type adjacency: numpy.ndarray
:param n_clust: Partitions number.
:type n_clust: int
:return: Initial guess.
:rtype: numpy.ndarray
"""
aux_memb = np.ones(adjacency.shape[0], dtype=np.int32) * (n_clust - 1)
cn = compute_cn(adjacency)
degree = adjacency.astype(np.bool_).sum(axis=1) + adjacency.astype(
np.bool_).sum(axis=0)
avg_degree = np.mean(degree)
degree_indices_g = np.nonzero(degree > 2)[0]
degree_indices_l = np.nonzero(degree <= 2)[0]
arg_max = np.argmax(degree[degree_indices_g])
clust_element = degree_indices_g[arg_max]
cluster_count = 0
while cluster_count != n_clust - 1:
aux_memb[clust_element] = cluster_count
degree_indices_g = np.delete(degree_indices_g, arg_max)
if len(degree_indices_g) == 0:
break
arg_max = np.argmin(cn[clust_element][degree_indices_g])
clust_element = degree_indices_g[arg_max]
cluster_count += 1
if np.unique(aux_memb).shape[0] < n_clust - 1:
cluster_count += 1
arg_max = np.argmax(degree[degree_indices_l])
clust_element = degree_indices_l[arg_max]
while cluster_count != n_clust - 1:
aux_memb[clust_element] = cluster_count
degree_indices_l = np.delete(degree_indices_l, arg_max)
if len(degree_indices_l) == 0:
raise ValueError(
"The number of clusters is higher thant the nodes number.")
arg_max = np.argmin(cn[clust_element][degree_indices_l])
clust_element = np.argmin(cn[clust_element][degree_indices_l])
cluster_count += 1
aux = np.nonzero(aux_memb == n_clust - 1)[0]
np.random.shuffle(aux)
for node in aux:
if degree[node] < avg_degree:
continue
aux_list = np.nonzero(aux_memb != n_clust - 1)[0]
node_index = aux_list[np.argmax(cn[node, aux_list])]
if isinstance(node_index, np.ndarray):
node_index = np.random.choice(node_index)
aux_memb[node] = aux_memb[node_index]
return aux_memb
def compute_degree(a, is_directed):
"""Returns matrix *a* degree sequence.
:param a: Matrix.
:type a: numpy.ndarray
:param is_directed: True if the matrix is directed.
:type is_directed: bool
:return: Degree sequence.
:rtype: numpy.ndarray.
"""
# if the matrix is a numpy array
if is_directed:
if type(a) == np.ndarray:
return np.sum(a > 0, 0), np.sum(a > 0, 1)
# if the matrix is a scipy sparse matrix
elif isspmatrix(a):
return np.sum(a > 0, 0).A1, np.sum(a > 0, 1).A1
else:
if type(a) == np.ndarray:
return np.sum(a > 0, 1)
# if the matrix is a scipy sparse matrix
elif isspmatrix(a):
return np.sum(a > 0, 1).A1
def compute_strength(a, is_directed):
"""Returns matrix *a* strength sequence.
:param a: Matrix.
:type a: numpy.ndarray
:param is_directed: True if the matrix is directed.
:type is_directed: bool
:return: Strength sequence.
:rtype: numpy.ndarray
"""
if is_directed:
# if the matrix is a numpy array
if type(a) == np.ndarray:
return np.sum(a, 0), np.sum(a, 1)
# if the matrix is a scipy sparse matrix
elif isspmatrix(a):
return np.sum(a, 0).A1, np.sum(a, 1).A1
else:
# if the matrix is a numpy array
if type(a) == np.ndarray:
return np.sum(a, 1)
# if the matrix is a scipy sparse matrix
elif isspmatrix(a):
return np.sum(a, 1).A1
def from_edgelist(edgelist, is_sparse, is_directed):
"""Returns np.ndarray or scipy.sparse matrix from edgelist.
:param edgelist: List of edges, eache edge must be given as a 2-tuples
(u,v).
:type edgelist: list or numpy.ndarray
:param is_sparse: If true the returned matrix is sparse.
:type is_sparse: bool
:param is_directed: If true the graph is directed.
:type is_directed: bool
:return: Adjacency matrix.
:rtype: numpy.ndarray or scipy.sparse
"""
# TODO: vedere che tipo di sparse e'
if is_directed:
g = nx.DiGraph()
else:
g = nx.Graph()
g.add_edges_from(edgelist)
if is_sparse:
return nx.to_scipy_sparse_matrix(g)
else:
return nx.to_numpy_array(g)
def from_weighted_edgelist(edgelist, is_sparse, is_directed):
"""Returns np.ndarray or scipy.sparse matrix from edgelist.
:param edgelist: List of weighted edges, eache edge must be given as a
3-tuples (u,v,w).
:type edgelist: [type]
:param is_sparse: If true the returned matrix is sparse.
:type is_sparse: bool
:param is_directed: If true the graph is directed.
:type is_directed: bool
:return: Weighted adjacency matrix.
:rtype: numpy.ndarray or scipy.sparse
"""
if is_directed:
g = nx.DiGraph()
else:
g = nx.Graph()
g.add_weighted_edges_from(edgelist)
if is_sparse:
return nx.to_scipy_sparse_matrix(g)
else:
return nx.to_numpy_array(g)
def check_symmetric(a, is_sparse, rtol=1e-05, atol=1e-08):
"""Checks if the matrix is symmetric.
:param a: Matrix.
:type a: numpy.ndarray or scipy.sparse
:param is_sparse: If true the matrix is sparse.
:type is_sparse: bool
:param rtol: Tuning parameter, defaults to 1e-05.
:type rtol: float, optional
:param atol: Tuning parameter, defaults to 1e-08.
:type atol: float, optional
:return: True if the matrix is symmetric.
:rtype: bool
"""
if is_sparse:
return np.all(np.abs(a - a.T) < atol)
else:
return np.allclose(a, a.T, rtol=rtol, atol=atol)
def check_adjacency(adjacency, is_sparse, is_directed):
"""Functions checking the _validty_ of the adjacency matrix.
:param adjacency: Adjacency matrix.
:type adjacency: numpy.ndarray or scipy.sparse
:param is_sparse: If true the matrix is sparse.
:type is_sparse: bool
:param is_directed: True if the graph is directed.
:type is_directed: bool
:raises TypeError: Matrix not square.
:raises ValueError: Negative entries.
:raises TypeError: Matrix not symmetric.
"""
if adjacency.shape[0] != adjacency.shape[1]:
raise TypeError(
"Adjacency matrix must be square. If you are passing an edgelist"
" use the positional argument 'edgelist='.")
if np.sum(adjacency < 0):
raise ValueError(
"The adjacency matrix entries must be positive."
)
if (not check_symmetric(adjacency, is_sparse)) and (not is_directed):
raise TypeError(
"The adjacency matrix seems to be not symmetric, we suggest to use"
" 'DirectedGraphClass'.")
@jit(nopython=True, fastmath=True)
def sumLogProbabilities(nextlogp, logp):
if nextlogp == 0:
stop = True
else:
stop = False
if nextlogp > logp:
common = nextlogp
diffexponent = logp - common
else:
common = logp
diffexponent = nextlogp - common
logp = common + ((np.log10(1 + 10 ** diffexponent)) / np.log10(10))
if (nextlogp - logp) > -4:
stop = True
return logp, stop
@jit(nopython=True, fastmath=True)
def logc(n, k):
if k == n:
return 0
elif (n > 1000) & (k > 1000): # Stirling's binomial coeff approximation
return logStirFac(n) - logStirFac(k) - logStirFac(n - k)
else:
t = n - k
if t < k:
t = k
logC = sumRange(t + 1, n) - sumFactorial(n - t)
return logC
@jit(nopython=True, fastmath=True)
def logStirFac(n):
if n <= 1:
return 1.0
else:
return -n + n * np.log10(n) + np.log10(
n * (1 + 4.0 * n * (1.0 + | |
<gh_stars>0
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#port set as 7891
import numpy as np
from federatedml.ftl import mulmatOT
from arch.api.utils import log_utils
import time
import multiprocessing
LOGGER = log_utils.getLogger()
# from federatedml.optim.activation import sigmoid
class MulmatProcess(multiprocessing.Process):
def __init__(self, role,port,b,expandsA,ROW_B,COL_B, queue):
super().__init__()
self.role=role
self.port=port
self.b=b
self.expandsA=expandsA
self.ROW_B=ROW_B
self.COL_B=COL_B
self.queue = queue
def mulMat(self):
res=mulmatOT.mulmatOT(self.role,self.port,self.b,self.expandsA,self.ROW_B,self.COL_B)
# 将结果放入队列
self.queue.put(res)
def run(self):
self.mulMat()
def sigmoid(x):
return 1. / (1. + np.exp(-x))
#New!!把矩阵拉伸成向量
def stretching(matA):
rowA=matA.shape[0]
colA=matA.shape[1]
matB=np.zeros((1,rowA*colA))
for i in range(0,rowA):
for j in range(0,colA):
matB[0][i*colA+j]=matA[i][j]
return matB
class PartyModelInterface(object):
def send_components(self):
pass
def receive_components(self, components):
pass
def send_gradients(self):
pass
def receive_gradients(self, gradients):
pass
def predict(self, X):
pass
class OTEncryptedFTLGuestModel(PartyModelInterface): #uB*phi, phi as a choice
def __init__(self, local_model, model_param, is_trace=False):
super(OTEncryptedFTLGuestModel, self).__init__()
self.localModel = local_model
self.feature_dim = local_model.get_encode_dim()
self.alpha = model_param.alpha
self.is_trace = is_trace
self.logger = LOGGER
self.expands=[] #new 记录所有参与矩阵运算的矩阵的expands和大小ROW,COL
def set_batch(self, X, y, non_overlap_indexes=None, overlap_indexes=None):
self.X = X
self.y = y
self.non_overlap_indexes = non_overlap_indexes
self.overlap_indexes = overlap_indexes
self.phi = None
def __compute_phi(self, uA, y):
length_y = len(y)
return np.expand_dims(np.sum(y * uA, axis=0) / length_y, axis=0)
#======= for loss:here we only need y_overlap, and phi; We need to calculate matmul(uB_overlap,phi.transpose) by OT matrix multiplication .and uA_overlap*uB_overlap by OT float multiplication
#======= for gradient to host: we need y_overlap_2_phi_2, y_overlap_phi, mapping_comp_A
#======= for gradient to guest: we need y_overlap_2_phi, and y_overlap. We need to calculate matmul(y_overlap_2_phi, uB_overlap_2) by OT matrix multiplication;
# and y_overlap * uB_overlap by OT float multiplication
def _compute_components(self):
self.uA = self.localModel.transform(self.X)
# phi has shape (1, feature_dim)
# phi_2 has shape (feature_dim, feature_dim)
self.phi = self.__compute_phi(self.uA, self.y)
self.phi_2 = np.matmul(self.phi.transpose(), self.phi)
# y_overlap and y_overlap_2 have shape (len(overlap_indexes), 1)
self.y_overlap = self.y[self.overlap_indexes]
self.y_overlap_2 = self.y_overlap * self.y_overlap
if self.is_trace:
self.logger.debug("phi shape" + str(self.phi.shape))
self.logger.debug("phi_2 shape" + str(self.phi_2.shape))
self.logger.debug("y_overlap shape" + str(self.y_overlap.shape))
self.logger.debug("y_overlap_2 shape" + str(self.y_overlap_2.shape))
# following two parameters will be sent to host
# y_overlap_2_phi_2 has shape (len(overlap_indexes), feature_dim, feature_dim)
# y_overlap_phi has shape (len(overlap_indexes), feature_dim)
self.y_overlap_2_phi_2 = 0.25 * np.expand_dims(self.y_overlap_2, axis=2) * self.phi_2
self.y_overlap_phi = -0.5 * self.y_overlap * self.phi
self.uA_overlap = self.uA[self.overlap_indexes]
# mapping_comp_A has shape (len(overlap_indexes), feature_dim)
self.mapping_comp_A = - self.uA_overlap / self.feature_dim
# self.special = - self.uA_overlap / self.feature_dim #先传过去看看New!!!
if self.is_trace:
self.logger.debug("y_overlap_2_phi_2 shape" + str(self.y_overlap_2_phi_2.shape))
self.logger.debug("y_overlap_phi shape" + str(self.y_overlap_phi.shape))
self.logger.debug("mapping_comp_A shape" + str(self.mapping_comp_A.shape))
#compute the expands of the matrix and send it to the host
uA_overlap = - self.uA_overlap / self.feature_dim
self.uA_overlap_expands=send_expand_matrix(len(self.overlap_indexes),self.feature_dim,uA_overlap)
phit=self.phi.transpose()
self.phit_expands=send_expand_matrix(self.feature_dim,1,phit)
self.y_overlap_2_phi_expands=[]
self.y_overlap_2_phi_2_expands=[]
matrix_y_overlap=np.zeros((len(self.overlap_indexes),self.feature_dim))
for i in range(0,len(self.overlap_indexes)): #倒过来
for j in range(0,self.feature_dim):
matrix_y_overlap[i][j]=self.y_overlap[i]
self.y_overlap_expands=send_expand_matrix(len(self.overlap_indexes),self.feature_dim,matrix_y_overlap)
y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1)
for i in range(0,len(self.overlap_indexes)):
expands=send_expand_matrix(1,self.feature_dim,y_overlap_2_phi[i])
self.y_overlap_2_phi_expands.append(expands)
for i in range(0,len(self.overlap_indexes)):
expands=send_expand_matrix(self.feature_dim,self.feature_dim,self.y_overlap_2_phi_2[i])
self.y_overlap_2_phi_2_expands.append(expands)
#new
# #获取精度为4的放大缩小矩阵,在fit里循环调用这个函数来将所有需要的矩阵的expends和大小集合发送过去。
# def send_expand_matrix(self,ROW,COL,matrix):
# matrixInt,matrixIntTimes=mulmatOT.ChaneToInt(ROW,COL,matrix)
# return [ROW,COL,matrixIntTimes]
# #收取对方发送过来的expands集合
# def receive_expand_matrix(self,expandsReceived):
# return expandsReceived
# def compute_OT(self):
# #Loss_share1: matmul(uB_overlap,phi.transpose) ,guest as choice of OT,expands index=0
# uB_phi_share=mulmatOT.mulmatOT(1,phi.transpose,self.expands[0],self.feature_dim,1)
# loss_share1=-0.5 * np.sum(y_overlap*uB_phi_share)
def send_components(self):
#计算需要的变量
self._compute_components()
#计算参与OT的矩阵的放大缩小矩阵,ROW,COL
#发送放大缩小矩阵,ROWB
return [self.y_overlap_2_phi_2, self.y_overlap_phi, self.mapping_comp_A, self.uA_overlap_expands,self.phit_expands,self.y_overlap_2_phi_expands,self.y_overlap_2_phi_2_expands,self.y_overlap_expands]#New!!!这里应该把component全部换掉,矩阵的信息等
#计算OT矩阵乘法与OT float,获得share,并加起来
#发送share
def receive_components(self, components):
self.uB_overlap = components[0]
self.uB_overlap_2 = components[1]
self.uB_overlap_expands= components[2]
self.uB_overlap_2_expands = components[3]
self.uB_overlap_ex_expands = components[4]
self.mapping_comp_B = components[5]
# self._update_gradients()#New!!!这里要独立出去
# self._update_loss()
def _update_gradients(self):
# y_overlap_2 have shape (len(overlap_indexes), 1),
# phi has shape (1, feature_dim),
# y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim)
y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1)
# uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim)
# loss_grads_const_part1 has shape (len(overlap_indexes), feature_dim)
loss_grads_const_part1 = 0.25 * np.squeeze(np.matmul(y_overlap_2_phi, self.uB_overlap_2), axis=1)
# loss_grads_const_part2 has shape (len(overlap_indexes), feature_dim)
loss_grads_const_part2 = self.y_overlap * self.uB_overlap
# print("reallossconst=-===")
# print(loss_grads_const_part1)
# print(loss_grads_const_part2)
if self.is_trace:
self.logger.debug("loss_grads_const_part1 shape" + str(loss_grads_const_part1.shape))
self.logger.debug("loss_grads_const_part2 shape" + str(loss_grads_const_part2.shape))
self.logger.debug("y_overlap shape" + str(self.y_overlap.shape))
self.logger.debug("uB_overlap shape" + str(self.uB_overlap.shape))
const = np.sum(loss_grads_const_part1, axis=0) - 0.5 * np.sum(loss_grads_const_part2, axis=0)
# grad_A_nonoverlap has shape (len(non_overlap_indexes), feature_dim)
# grad_A_overlap has shape (len(overlap_indexes), feature_dim)
grad_A_nonoverlap = self.alpha * const * self.y[self.non_overlap_indexes] / len(self.y)
grad_A_overlap = self.alpha * const * self.y_overlap / len(self.y) + self.mapping_comp_B
loss_grad_A = np.zeros((len(self.y), self.uB_overlap.shape[1]))
loss_grad_A[self.non_overlap_indexes, :] = grad_A_nonoverlap
loss_grad_A[self.overlap_indexes, :] = grad_A_overlap
loss_grads = loss_grad_A
# print("reallossgrads====")
# print(loss_grads)
#self.localModel.backpropogate(self.X, self.y, loss_grad_A)
#New!!!
def prepare_gradient(self):
# y_overlap_2 have shape (len(overlap_indexes), 1),
# phi has shape (1, feature_dim),
# y_overlap_2_phi has shape (len(overlap_indexes), 1, feature_dim)
y_overlap_2_phi = np.expand_dims(self.y_overlap_2 * self.phi, axis=1)
# uB_overlap_2 has shape (len(overlap_indexes), feature_dim, feature_dim)
# loss_grads_const_part1 has shape (len(overlap_indexes), feature_dim)
res1=np.zeros((len(self.overlap_indexes), 1, self.feature_dim))
processes=[]
queues=[]
for i in range(0,y_overlap_2_phi.shape[0]):
matrixB=y_overlap_2_phi[i]
# matrixA=self.uB_overlap_2[i]
# msgfromA=send_expand_matrix(self.feature_dim,self.feature_dim,matrixA)
# expandsA=receive_expand_matrix(msgfromA)
expandsA=self.uB_overlap_2_expands[i]
# temp=mulmatOT.mulmatOT(0,7891,matrixB,expandsA,1,self.feature_dim)
# res1[i]=temp
#speed up with process
queue=multiprocessing.Queue()
queues.append(queue)
p=MulmatProcess(0,7891+i,matrixB,expandsA,1,self.feature_dim,queue)
p.start()
processes.append(p)
for i,process in enumerate(processes):
process.join()
res1[i]=queues[i].get()
# p=Process(target=process_P,args=(0,7891+i,matrixB,expandsA,1,self.feature_dim,res1,i))
# p.start()
# processes.append(p)
# for process in processes:
# process.join()
loss_grads_const_part1_share=0.25 * np.squeeze(res1, axis=1)
# matrixA=np.zeros((1,1))#可优化
# matrixB=np.zeros((1,1))
# res2=np.zeros((len(self.overlap_indexes),self.feature_dim))
# for i in range(0,self.uB_overlap.shape[0]): #倒过来
# for j in range(0,self.uB_overlap.shape[1]):
# matrixA[0][0]=self.y_overlap[i]
# matrixB[0][0]=self.uB_overlap[i][j]
# msgfromB=send_expand_matrix(1,1,matrixB)
# expandsB=receive_expand_matrix(msgfromB)
# temp=mulmatOT.mulmatOT(0,matrixA,expandsB,1,1)
# res2[i][j]=temp[0][0]
matrixA=np.zeros((len(self.overlap_indexes),self.feature_dim))
for i in range(0,len(self.overlap_indexes)): #倒过来
for j in range(0,self.feature_dim):
matrixA[i][j]=self.y_overlap[i]
# matrixB=self.uB_overlap
# msgfromB=send_expand_matrix(len(self.overlap_indexes),self.feature_dim,matrixB)
expandsB=self.uB_overlap_expands
res2=mulmatOT.mulmatOT_wise(0,7891,matrixA,expandsB,len(self.overlap_indexes),self.feature_dim)
loss_grads_const_part2_share=res2
# self.const_share = [loss_grads_const_part1_share,loss_grads_const_part2_share]
self.const_share=np.sum(loss_grads_const_part1_share, axis=0) - 0.5 * np.sum(loss_grads_const_part2_share, axis=0)
def compute_gradients(self,gradient_share_from_host):
# const_part1=self.const_share[0]+gradient_share_from_host[0]
# const_part2=self.const_share[1]+gradient_share_from_host[1]
# print("lossconst")
# print(const_part1)
# print(const_part2)
# const = np.sum(const_part1, axis=0) - 0.5 * np.sum(const_part2, axis=0)
const=self.const_share+gradient_share_from_host
# grad_A_nonoverlap has shape (len(non_overlap_indexes), feature_dim)
# grad_A_overlap has shape (len(overlap_indexes), feature_dim)
grad_A_nonoverlap = self.alpha * const * self.y[self.non_overlap_indexes] / len(self.y)
grad_A_overlap = self.alpha * const * self.y_overlap / len(self.y) + self.mapping_comp_B
loss_grad_A = np.zeros((len(self.y), self.feature_dim))
loss_grad_A[self.non_overlap_indexes, :] = grad_A_nonoverlap
loss_grad_A[self.overlap_indexes, :] = grad_A_overlap
self.loss_grads = loss_grad_A
# print("loss_grads")
# print(self.loss_grads)
self.localModel.backpropogate(self.X, self.y, loss_grad_A)
def send_gradient_shares(self):
self.assist_gradient()
return self.l1_grad_B_share
#assist gradient for host
def assist_gradient(self):
# uB_overlap_ex has shape (len(overlap_indexes), 1, feature_dim)
# uB_overlap_ex = np.expand_dims(self.uB_overlap, axis=1)
res1=np.zeros((len(self.overlap_indexes), 1, self.feature_dim))
processes=[]
queues=[]
for i in range(0,len(self.overlap_indexes)):
# matrixB=uB_overlap_ex[i]
matrixA=self.y_overlap_2_phi_2[i]
# msgfromB=send_expand_matrix(1,self.feature_dim,matrixB)
expandsB=self.uB_overlap_ex_expands[i]
# temp=mulmatOT.mulmatOT(1,7891,matrixA,expandsB,self.feature_dim,self.feature_dim)
# res1[i]=temp
#speed up with process
queue=multiprocessing.Queue()
queues.append(queue)
p=MulmatProcess(1,7891+i,matrixA,expandsB,self.feature_dim,self.feature_dim,queue)
p.start()
processes.append(p)
for i,process in enumerate(processes):
process.join()
res1[i]=queues[i].get()
# p=Process(target=process_P,args=(1,7891+i,matrixA,expandsB,self.feature_dim,self.feature_dim,res1,i))
# p.start()
# processes.append(p)
# for process in processes:
# process.join()
self.l1_grad_B_share=np.squeeze(res1, axis=1)+self.y_overlap_phi
def send_loss(self):
return self.loss
def receive_loss(self, loss):
self.loss = loss
def _update_loss(self):
uA_overlap = - self.uA_overlap / self.feature_dim
loss_overlap = np.sum(uA_overlap * self.uB_overlap)
# print("=======realLoss")
# print(loss_overlap)
loss_y = self.__compute_loss_y(self.uB_overlap, self.y_overlap, self.phi)
#self.loss = self.alpha * loss_y + loss_overlap
loss = self.alpha * loss_y + loss_overlap
# print("realloss:===")
# print(loss)
#New!!
def prepare_loss(self):
self.prepare_loss_part1()
self.prepare_loss_part2()
def prepare_loss_part1(self):
phit=self.phi.transpose()
# msgfromB=send_expand_matrix(len(self.overlap_indexes),self.feature_dim,self.uB_overlap)
expandsB=self.uB_overlap_expands
self.uB_phi_share=mulmatOT.mulmatOT(1,7891,phit,expandsB,self.feature_dim,1)
# print("------------------result------------------")
# print(self.uB_phi_share)
# print("------------------phi-------------------")
# print(phit)
# print("------------------uB-------------------")
# print(self.uB_overlap)
# outres=np.matmul(self.uB_overlap,phit)
# print("-=======================================-")
# print(outres)
def prepare_loss_part2(self):
uA_overlap = - self.uA_overlap / self.feature_dim #这是什么东西?
# uB_overlap=self.uB_overlap
# matrixA=np.zeros((1,1))#可优化
# matrixB=np.zeros((1,1))
# self.loss_overlap_share=0
# for i in range(0,uA_overlap.shape[0]):
# for j in range(0,uA_overlap.shape[1]):
# matrixA[0][0]=uA_overlap[i][j]
# matrixB[0][0]=uB_overlap[i][j]
# msgfromB=send_expand_matrix(1,1,matrixB)
# expandsB=receive_expand_matrix(msgfromB)
# res=mulmatOT.mulmatOT(1,matrixA,expandsB,1,1)
# self.loss_overlap_share=self.loss_overlap_share+res[0][0]
# msgfromB=send_expand_matrix(len(self.overlap_indexes),self.feature_dim,uB_overlap)
expandsB=self.uB_overlap_expands
res=mulmatOT.mulmatOT_wise(1,7891,uA_overlap,expandsB,len(self.overlap_indexes),self.feature_dim)
self.loss_overlap_share=np.sum(res)
# print("lossA:===")
# print(self.loss_overlap_share)
def compute_loss(self,loss_share):
# print("lossshare:===")
# print(loss_share)
# uA_overlap = - self.uA_overlap / self.feature_dim
loss_overlap = self.loss_overlap_share+loss_share[1]
uB_phi=self.uB_phi_share+loss_share[0]
loss_y = (-0.5 * np.sum(self.y_overlap * uB_phi) + 1.0 / 8 * np.sum(uB_phi * uB_phi)) + len(self.y_overlap) * np.log(2)
loss2 = self.alpha * loss_y + loss_overlap
self.loss=loss2
# print("lossoverlap:===")
# print(loss_overlap)
# print("lossy:===")
# print(loss_y)
# print("loss:===")
# print(loss2)
def __compute_loss_y(self, uB_overlap, y_overlap, phi):
# uB_phi has shape (len(overlap_indexes), 1)
uB_phi = np.matmul(uB_overlap, phi.transpose())
loss_y = (-0.5 * np.sum(y_overlap * uB_phi) + 1.0 / 8 * np.sum(uB_phi * uB_phi)) + len(y_overlap) * np.log(2)
return loss_y
def get_loss_grads(self):
return self.loss_grads
def predict(self, uB):
if self.phi is None:
self.uA = self.localModel.transform(self.X)
self.phi = self.__compute_phi(self.uA, self.y)
return sigmoid(np.matmul(uB, self.phi.transpose()))
def restore_model(self, model_parameters):
self.localModel.restore_model(model_parameters)
def get_model_parameters(self):
return self.localModel.get_model_parameters()
class OTEncryptedFTLHostModel(PartyModelInterface):
def __init__(self, local_model, model_param, is_trace=False):
super(OTEncryptedFTLHostModel, self).__init__()
self.localModel = local_model
self.feature_dim = | |
0.20000000000000018),
(0.911600133406095, 2.805616723070703, 0.09999999999999964),
(0.9270509831248424, 2.8531695488854605, 0.0),
(0.9425018328435897, 2.900722374700218, 0.09999999999999964),
(0.9579526825623371, 2.948275200514976, 0.20000000000000018),
(0.9734035322810846, 2.995828026329734, 0.3000000000000007),
(0.9888543819998319, 3.0433808521444914, 0.40000000000000036),
(1.0043052317185792, 3.090933677959249, 0.5),
(1.0197560814373265, 3.1384865037740064, 0.5999999999999996),
(1.035206931156074, 3.1860393295887643, 0.7000000000000002),
(1.0506577808748214, 3.2335921554035223, 0.8000000000000007),
(1.0661086305935688, 3.28114498121828, 0.9000000000000004),
(1.081559480312316, 3.3286978070330373, 1),
(1.0970103300310634, 3.376250632847795, 1),
(1.112461179749811, 3.4238034586625528, 1),
(1.1279120294685583, 3.4713562844773107, 1),
(1.1433628791873056, 3.518909110292068, 1),
(1.158813728906053, 3.5664619361068257, 1),
(1.1742645786248003, 3.614014761921583, 1),
(1.1897154283435478, 3.661567587736341, 1),
(1.2051662780622951, 3.709120413551099, 1),
(1.2206171277810425, 3.7566732393658566, 1),
(1.2360679774997898, 3.804226065180614, 1),
(-0.20905692653530666, 1.9890437907365468, 1),
(-0.2142833496986893, 2.0387698855049603, 1),
(-0.21950977286207202, 2.088495980273374, 1),
(-0.22473619602545467, 2.1382220750417877, 1),
(-0.22996261918883734, 2.1879481698102015, 1),
(-0.23518904235222, 2.237674264578615, 1),
(-0.24041546551560264, 2.2874003593470285, 1),
(-0.24564188867898534, 2.3371264541154426, 1),
(-0.25086831184236796, 2.386852548883856, 1),
(-0.2560947350057507, 2.43657864365227, 1),
(-0.2613211581691333, 2.4863047384206833, 1),
(-0.266547581332516, 2.536030833189097, 0.9000000000000004),
(-0.27177400449589867, 2.5857569279575108, 0.7999999999999998),
(-0.27700042765928135, 2.6354830227259245, 0.7000000000000002),
(-0.282226850822664, 2.685209117494338, 0.5999999999999996),
(-0.28745327398604664, 2.734935212262752, 0.5),
(-0.2926796971494293, 2.784661307031165, 0.40000000000000036),
(-0.297906120312812, 2.8343874017995794, 0.2999999999999998),
(-0.30313254347619467, 2.8841134965679927, 0.20000000000000018),
(-0.30835896663957735, 2.933839591336407, 0.09999999999999964),
(-0.31358538980296, 2.98356568610482, 0.0),
(-0.31881181296634264, 3.033291780873234, 0.09999999999999964),
(-0.3240382361297253, 3.0830178756416475, 0.20000000000000018),
(-0.32926465929310805, 3.1327439704100617, 0.3000000000000007),
(-0.3344910824564907, 3.182470065178475, 0.40000000000000036),
(-0.33971750561987335, 3.2321961599468887, 0.5),
(-0.34494392878325597, 3.281922254715302, 0.5999999999999996),
(-0.35017035194663865, 3.331648349483716, 0.7000000000000002),
(-0.3553967751100214, 3.38137444425213, 0.8000000000000007),
(-0.360623198273404, 3.4311005390205436, 0.9000000000000004),
(-0.3658496214367867, 3.480826633788957, 1),
(-0.3710760446001693, 3.5305527285573706, 1),
(-0.37630246776355203, 3.5802788233257843, 1),
(-0.3815288909269347, 3.6300049180941985, 1),
(-0.3867553140903173, 3.6797310128626117, 1),
(-0.3919817372537, 3.7294571076310254, 1),
(-0.3972081604170826, 3.7791832023994387, 1),
(-0.40243458358046535, 3.828909297167853, 1),
(-0.40766100674384803, 3.8786353919362666, 1),
(-0.4128874299072307, 3.9283614867046803, 1),
(-0.41811385307061333, 3.9780875814730936, 1),
(-0.9999999999999996, 1.7320508075688774, 1),
(-1.0249999999999995, 1.7753520777580991, 1),
(-1.0499999999999996, 1.8186533479473213, 1),
(-1.0749999999999995, 1.8619546181365432, 1),
(-1.0999999999999996, 1.9052558883257653, 1),
(-1.1249999999999996, 1.948557158514987, 1),
(-1.1499999999999995, 1.991858428704209, 1),
(-1.1749999999999996, 2.035159698893431, 1),
(-1.1999999999999995, 2.078460969082653, 1),
(-1.2249999999999996, 2.121762239271875, 1),
(-1.2499999999999996, 2.165063509461097, 1),
(-1.2749999999999992, 2.2083647796503185, 0.9000000000000004),
(-1.2999999999999994, 2.2516660498395407, 0.7999999999999998),
(-1.3249999999999993, 2.2949673200287624, 0.7000000000000002),
(-1.3499999999999994, 2.3382685902179845, 0.5999999999999996),
(-1.3749999999999993, 2.3815698604072066, 0.5),
(-1.3999999999999992, 2.4248711305964283, 0.40000000000000036),
(-1.4249999999999994, 2.4681724007856505, 0.2999999999999998),
(-1.4499999999999993, 2.511473670974872, 0.20000000000000018),
(-1.4749999999999994, 2.5547749411640943, 0.09999999999999964),
(-1.4999999999999993, 2.598076211353316, 0.0),
(-1.5249999999999992, 2.6413774815425377, 0.09999999999999964),
(-1.5499999999999994, 2.68467875173176, 0.20000000000000018),
(-1.5749999999999995, 2.7279800219209824, 0.3000000000000007),
(-1.5999999999999994, 2.771281292110204, 0.40000000000000036),
(-1.6249999999999993, 2.814582562299426, 0.5),
(-1.6499999999999992, 2.8578838324886475, 0.5999999999999996),
(-1.6749999999999994, 2.9011851026778697, 0.7000000000000002),
(-1.6999999999999995, 2.944486372867092, 0.8000000000000007),
(-1.7249999999999994, 2.9877876430563135, 0.9000000000000004),
(-1.7499999999999991, 3.0310889132455356, 1),
(-1.774999999999999, 3.0743901834347573, 1),
(-1.7999999999999992, 3.1176914536239795, 1),
(-1.8249999999999993, 3.1609927238132016, 1),
(-1.8499999999999992, 3.2042939940024233, 1),
(-1.8749999999999991, 3.247595264191645, 1),
(-1.899999999999999, 3.290896534380867, 1),
(-1.9249999999999992, 3.3341978045700893, 1),
(-1.9499999999999993, 3.3774990747593114, 1),
(-1.9749999999999992, 3.420800344948533, 1),
(-1.9999999999999991, 3.464101615137755, 1),
(-1.6180339887498947, 1.1755705045849465, 1),
(-1.658484838468642, 1.20495976719957, 1),
(-1.6989356881873894, 1.234349029814194, 1),
(-1.7393865379061366, 1.2637382924288174, 1),
(-1.7798373876248843, 1.2931275550434413, 1),
(-1.8202882373436315, 1.3225168176580648, 1),
(-1.8607390870623788, 1.3519060802726883, 1),
(-1.9011899367811262, 1.3812953428873123, 1),
(-1.9416407864998735, 1.4106846055019358, 1),
(-1.9820916362186212, 1.4400738681165595, 1),
(-2.022542485937368, 1.4694631307311832, 1),
(-2.0629933356561154, 1.4988523933458067, 0.9000000000000004),
(-2.103444185374863, 1.5282416559604306, 0.7999999999999998),
(-2.1438950350936103, 1.557630918575054, 0.7000000000000002),
(-2.184345884812358, 1.5870201811896778, 0.5999999999999996),
(-2.2247967345311053, 1.6164094438043015, 0.5),
(-2.2652475842498525, 1.645798706418925, 0.40000000000000036),
(-2.3056984339686, 1.6751879690335487, 0.2999999999999998),
(-2.3461492836873474, 1.7045772316481724, 0.20000000000000018),
(-2.3866001334060947, 1.7339664942627961, 0.09999999999999964),
(-2.427050983124842, 1.7633557568774196, 0.0),
(-2.467501832843589, 1.7927450194920433, 0.09999999999999964),
(-2.507952682562337, 1.822134282106667, 0.20000000000000018),
(-2.5484035322810845, 1.851523544721291, 0.3000000000000007),
(-2.5888543819998318, 1.8809128073359145, 0.40000000000000036),
(-2.629305231718579, 1.910302069950538, 0.5),
(-2.6697560814373262, 1.9396913325651617, 0.5999999999999996),
(-2.7102069311560735, 1.9690805951797854, 0.7000000000000002),
(-2.750657780874821, 1.9984698577944093, 0.8000000000000007),
(-2.7911086305935684, 2.027859120409033, 0.9000000000000004),
(-2.8315594803123156, 2.0572483830236563, 1),
(-2.872010330031063, 2.08663764563828, 1),
(-2.9124611797498106, 2.1160269082529037, 1),
(-2.9529120294685582, 2.1454161708675277, 1),
(-2.9933628791873055, 2.174805433482151, 1),
(-3.0338137289060527, 2.2041946960967747, 1),
(-3.0742645786248, 2.233583958711398, 1),
(-3.114715428343547, 2.262973221326022, 1),
(-3.155166278062295, 2.292362483940646, 1),
(-3.195617127781042, 2.3217517465552695, 1),
(-3.2360679774997894, 2.351141009169893, 1))
target = (1.6, 2.6, 0)
interpolator = interp.interpolate.InterpAnisotropic()
pick_closest = True
interpolator.set_points(centerline, cross_sections, pick_closest)
sn_points = interpolator.get_interpolation_points()
expected_sn_points = (
(7.861695483191204, -0.9931736833190422, 1.0),
(7.8602657756205705, -0.9431941281363231, 1.0),
(7.858836068049938, -0.8932145729536034, 1.0),
(7.857406360479321, -0.8432350177708842, 1.0),
(7.855976652908689, -0.7932554625881644, 1.0),
(7.854546945338057, -0.7432759074054452, 1.0),
(7.85311723776744, -0.693296352222726, 1.0),
(7.851687530196807, -0.6433167970400063, 1.0),
(7.850257822626175, -0.5933372418572872, 1.0),
(7.848828115055558, -0.5433576866745674, 1.0),
(7.847398407484926, -0.4933781314918483, 1.0),
(7.845968699914293, -0.4433985763091291, 0.9000000000000004),
(7.844538992343677, -0.39341902112640936, 0.7999999999999998),
(7.8431092847730435, -0.3434394659436902, 0.7000000000000002),
(7.841679577202411, -0.29345991076097055, 0.5999999999999996),
(7.840249869631794, -0.24348035557825168, 0.5),
(7.838820162061162, -0.1935008003955325, 0.40000000000000036),
(7.83739045449053, -0.14352124521281284, 0.2999999999999998),
(7.835960746919913, -0.0935416900300936, 0.20000000000000018),
(7.834531039349265, -0.04356213484737399, 0.09999999999999964),
(7.833101331778632, 0.006417420335345209, 0.0),
(7.831671624208016, 0.05639697551806445, 0.09999999999999964),
(7.830241916637383, 0.1063765307007841, 0.20000000000000018),
(7.828812209066751, 0.15635608588350375, 0.3000000000000007),
(7.827382501496134, 0.20633564106622299, 0.40000000000000036),
(7.825952793925501, 0.25631519624894217, 0.5),
(7.824523086354869, 0.3062947514316614, 0.5999999999999996),
(7.823093378784252, 0.3562743066143811, 0.7000000000000002),
(7.82166367121362, 0.40625386179710077, 0.8000000000000007),
(7.820233963643003, 0.45623341697982, 0.9000000000000004),
(7.8188042560723705, 0.5062129721625391, 1.0),
(7.817374548501738, 0.5561925273452584, 1.0),
(7.815944840931121, 0.606172082527978, 1.0),
(7.814515133360489, 0.6561516377106977, 1.0),
(7.813085425789856, 0.706131192893417, 1.0),
(7.81165571821924, 0.7561107480761358, 1.0),
(7.8102260106486066, 0.8060903032588549, 1.0),
(7.808796303077974, 0.8560698584415747, 1.0),
(7.807366595507357, 0.9060494136242943, 1.0),
(7.805936887936725, 0.9560289688070135, 1.0),
(7.804507180366093, 1.0060085239897327, 1.0),
(6.596612046804048, -0.9927193678084792, 1.0),
(6.595658836240024, -0.9427284547379958, 1.0),
(6.594705625676, -0.8927375416675122, 1.0),
(6.593752415111976, -0.8427466285970288, 1.0),
(6.592799204547952, -0.7927557155265451, 1.0),
(6.591845993983928, -0.742764802456062, 1.0),
(6.590892783419904, -0.6927738893855788, 1.0),
(6.589939572855879, -0.6427829763150952, 1.0),
(6.588986362291855, -0.5927920632446115, 1.0),
(6.5880331517278305, -0.5428011501741281, 1.0),
(6.5870799411638075, -0.49281023710364497, 1.0),
(6.586126730599783, -0.4428193240331617, 0.9000000000000004),
(6.5851735200357595, -0.39282841096267773, 0.7999999999999998),
(6.5842203094717355, -0.3428374978921945, 0.7000000000000002),
(6.58326709890771, -0.2928465848217108, 0.5999999999999996),
(6.582313888343686, -0.24285567175122758, 0.5),
(6.581360677779662, -0.1928647586807443, 0.40000000000000036),
(6.580407467215638, -0.14287384561026065, 0.2999999999999998),
(6.579454256651614, -0.09288293253977742, 0.20000000000000018),
(6.57850104608759, -0.04289201946929378, 0.09999999999999964),
(6.577547835523566, 0.0070988936011898525, 0.0),
(6.576594624959541, 0.05708980667167308, 0.09999999999999964),
(6.575641414395517, 0.10708071974215637, 0.20000000000000018),
(6.574688203831493, 0.15707163281264008, 0.3000000000000007),
(6.573734993267469, 0.20706254588312364, 0.40000000000000036),
(6.572781782703445, 0.2570534589536069, 0.5),
(6.57182857213942, 0.3070443720240902, 0.5999999999999996),
(6.570875361575397, 0.35703528509457416, 0.7000000000000002),
(6.569922151011373, 0.40702619816505786, 0.8000000000000007),
(6.5689689404473475, 0.45701711123554073, 0.9000000000000004),
(6.5680157298833235, 0.5070080243060239, 1.0),
(6.5670625193192995, 0.5569989373765075, 1.0),
(6.5661093087552755, 0.6069898504469912, 1.0),
(6.565156098191252, 0.6569807635174749, 1.0),
(6.564202887627228, 0.7069716765879578, 1.0),
(6.563249677063203, 0.7569625896584413, 1.0),
(6.562296466499179, 0.8069535027289246, 1.0),
(6.561343255935155, 0.8569444157994082, 1.0),
(6.560390045371131, 0.9069353288698919, 1.0),
(6.559436834807107, 0.9569262419403755, 1.0),
(6.558483624243083, 1.0069171550108587, 1.0),
(5.331532075198482, -0.9924467619873838, 1.0),
(5.331055448260601, -0.942449033771373, 1.0),
(5.33057882132272, -0.8924513055553617, 1.0),
(5.330102194384839, -0.842453577339351, 1.0),
(5.329625567446957, -0.7924558491233403, 1.0),
(5.329148940509077, -0.7424581209073295, 1.0),
(5.328672313571195, -0.6924603926913188, 1.0),
(5.328195686633315, -0.6424626644753076, 1.0),
(5.327719059695434, -0.5924649362592969, 1.0),
(5.3272424327575525, -0.5424672080432857, 1.0),
(5.326765805819671, -0.4924694798272755, 1.0),
(5.326289178881791, -0.4424717516112648, 0.9000000000000004),
(5.325812551943909, -0.39247402339525334, 0.7999999999999998),
(5.325335925006029, -0.34247629517924255, 0.7000000000000002),
(5.324859298068148, -0.292478566963232, 0.5999999999999996),
(5.324382671130266, -0.24248083874722107, 0.5),
(5.323906044192386, -0.19248311053121042, 0.40000000000000036),
(5.323429417254505, -0.14248538231519933, 0.2999999999999998),
(5.322952790316624, -0.09248765409918873, 0.20000000000000018),
(5.322476163378743, -0.04248992588317764, 0.09999999999999964),
(5.321999536440861, 0.0075078023328332515, 0.0),
(5.32152290950298, 0.05750553054884372, 0.09999999999999964),
(5.321046282565099, 0.10750325876485511, 0.20000000000000018),
(5.320569655627218, 0.1575009869808662, 0.3000000000000007),
(5.320093028689337, 0.20749871519687632, 0.40000000000000036),
(5.319616401751456, 0.2574964434128874, 0.5),
(5.319139774813576, 0.30749417162889786, 0.5999999999999996),
(5.318663147875695, 0.357491899844909, 0.7000000000000002),
(5.318186520937814, 0.40748962806092004, 0.8000000000000007),
(5.3177098939999325, 0.45748735627693116, 0.9000000000000004),
(5.3172332670620515, 0.5074850844929413, 1.0),
(5.3167566401241695, 0.5574828127089518, 1.0),
(5.316280013186289, 0.6074805409249633, 1.0),
(5.315803386248408, 0.6574782691409743, 1.0),
(5.315326759310527, 0.7074759973569847, 1.0),
(5.314850132372646, 0.7574737255729953, 1.0),
(5.314373505434766, 0.807471453789006, 1.0),
(5.313896878496885, 0.8574691820050171, 1.0),
(5.313420251559004, 0.9074669102210281, 1.0),
(5.312943624621123, 0.9574646384370391, 1.0),
(5.3124669976832415, 1.0074623666530496, 1.0),
(4.066453836062427, -0.9923558906278168, 1.0),
(4.066453836062427, -0.942355890627817, 1.0),
(4.066453836062427, -0.8923558906278167, 1.0),
(4.066453836062427, -0.8423558906278171, 1.0),
(4.066453836062427, -0.7923558906278169, 1.0),
(4.066453836062427, -0.742355890627817, 1.0),
(4.066453836062427, -0.6923558906278172, 1.0),
(4.066453836062427, -0.642355890627817, 1.0),
(4.066453836062427, -0.5923558906278171, 1.0),
(4.066453836062427, -0.5423558906278168, 1.0),
(4.066453836062427, -0.492355890627817, 1.0),
(4.066453836062427, -0.4423558906278172, 0.9000000000000004),
(4.066453836062427, -0.3923558906278169, 0.7999999999999998),
(4.066453836062427, -0.34235589062781707, 0.7000000000000002),
(4.066453836062427, -0.2923558906278168, 0.5999999999999996),
(4.066453836062427, -0.24235589062781696, 0.5),
(4.066453836062427, -0.19235589062781713, 0.40000000000000036),
(4.066453836062427, -0.14235589062781687, 0.2999999999999998),
(4.066453836062427, -0.09235589062781704, 0.20000000000000018),
(4.066453836062427, -0.042355890627816764, 0.09999999999999964),
(4.066453836062427, 0.007644109372183057, 0.0),
(4.066453836062427, 0.05764410937218288, 0.09999999999999964),
(4.066453836062427, 0.10764410937218315, 0.20000000000000018),
(4.066453836062427, 0.15764410937218346, 0.3000000000000007),
(4.066453836062427, 0.20764410937218328, 0.40000000000000036),
(4.066453836062427, 0.2576441093721831, 0.5),
(4.066453836062427, 0.30764410937218295, 0.5999999999999996),
(4.066453836062427, 0.3576441093721832, 0.7000000000000002),
(4.066453836062427, 0.4076441093721835, 0.8000000000000007),
(4.066453836062427, 0.4576441093721833, 0.9000000000000004),
(4.066453836062427, 0.5076441093721831, 1.0),
(4.066453836062427, 0.5576441093721829, 1.0),
(4.066453836062427, 0.6076441093721833, 1.0),
(4.066453836062427, 0.6576441093721835, 1.0),
(4.066453836062427, 0.7076441093721834, 1.0),
(4.066453836062427, 0.7576441093721832, 1.0),
(4.066453836062427, 0.807644109372183, 1.0),
(4.066453836062427, 0.8576441093721833, 1.0),
(4.0664538360624265, 0.9076441093721832, 1.0),
(4.0664538360624265, 0.9576441093721829, 1.0),
(4.0664538360624265, 1.0076441093721826, 1.0),
(2.801375596926374, -0.9924467619873834, 1.0),
(2.801852223864255, -0.942449033771373, 1.0),
(2.802328850802136, -0.8924513055553619, 1.0),
(2.8028054777400166, -0.8424535773393513, 1.0),
(2.8032821046778977, -0.7924558491233402, 1.0),
(2.803758731615779, -0.7424581209073292, 1.0),
(2.80423535855366, -0.692460392691319, 1.0),
(2.804711985491541, -0.6424626644753074, 1.0),
(2.805188612429422, -0.5924649362592973, 1.0),
(2.8056652393673027, -0.5424672080432857, 1.0),
(2.806141866305184, -0.4924694798272752, 1.0),
(2.806618493243065, -0.44247175161126456, 0.9000000000000004),
(2.807095120180946, -0.3924740233952535, 0.7999999999999998),
(2.807571747118827, -0.34247629517924283, 0.7000000000000002),
(2.8080483740567077, -0.29247856696323177, 0.5999999999999996),
(2.808525000994589, -0.2424808387472207, 0.5),
(2.80900162793247, -0.19248311053121056, 0.40000000000000036),
(2.809478254870351, -0.14248538231519903, 0.2999999999999998),
(2.8099548818082316, -0.09248765409918885, 0.20000000000000018),
(2.8104315087461127, -0.04248992588317734, 0.09999999999999964),
(2.810908135683994, 0.007507802332833277, 0.0),
(2.811384762621875, 0.0575055305488439, 0.09999999999999964),
(2.811861389559756, 0.10750325876485498, 0.20000000000000018),
(2.812338016497637, 0.15750098698086604, 0.3000000000000007),
(2.8128146434355177, 0.20749871519687668, 0.40000000000000036),
(2.8132912703733988, 0.25749644341288774, 0.5),
(2.81376789731128, 0.3074941716288979, 0.5999999999999996),
(2.814244524249161, 0.3574918998449094, 0.7000000000000002),
(2.814721151187042, 0.40748962806092004, 0.8000000000000007),
(2.815197778124923, 0.4574873562769311, 0.9000000000000004),
(2.8156744050628038, 0.5074850844929417, 1.0),
(2.816151032000685, 0.5574828127089523, 1.0),
(2.816627658938566, 0.6074805409249634, 1.0),
(2.8171042858764466, 0.6574782691409745, 1.0),
(2.8175809128143277, 0.7074759973569851, 1.0),
(2.8180575397522087, 0.7574737255729962, 1.0),
(2.81853416669009, 0.8074714537890063, 1.0),
(2.819010793627971, 0.8574691820050179, 1.0),
(2.819487420565852, 0.9074669102210285, 1.0),
(2.8199640475037326, 0.9574646384370396, 1.0),
(2.8204406744416137, 1.0074623666530502, 1.0),
(1.5362956253208062, -0.9927193678084795, 1.0),
(1.5372488358848306, -0.9427284547379963, 1.0),
(1.5382020464488548, -0.8927375416675125, 1.0),
(1.5391552570128788, -0.8427466285970292, 1.0),
(1.540108467576903, -0.7927557155265457, 1.0),
(1.5410616781409272, -0.7427648024560625, 1.0),
(1.5420148887049514, -0.6927738893855792, 1.0),
(1.5429680992689756, -0.6427829763150953, 1.0),
(1.5439213098329998, -0.5927920632446118, 1.0),
(1.5448745203970238, -0.5428011501741281, 1.0),
(1.545827730961048, -0.4928102371036451, 1.0),
(1.5467809415250722, -0.44281932403316204, 0.9000000000000004),
(1.5477341520890961, -0.39282841096267807, 0.7999999999999998),
(1.5486873626531203, -0.342837497892195, 0.7000000000000002),
(1.5496405732171445, -0.2928465848217113, 0.5999999999999996),
(1.550593783781169, -0.2428556717512277, 0.5),
(1.551546994345193, -0.1928647586807446, 0.40000000000000036),
(1.5525002049092171, -0.1428738456102609, 0.2999999999999998),
(1.5534534154732413, -0.09288293253977782, 0.20000000000000018),
(1.5544066260372653, -0.042892019469293864, 0.09999999999999964),
(1.5553598366012897, 0.0070988936011893355, 0.0),
(1.5563130471653137, 0.05708980667167242, 0.09999999999999964),
(1.5572662577293377, 0.10708071974215638, 0.20000000000000018),
(1.558219468293362, 0.15707163281264044, 0.3000000000000007),
(1.559172678857386, 0.20706254588312353, 0.40000000000000036),
(1.56012588942141, 0.2570534589536066, 0.5),
(1.5610790999854345, 0.3070443720240898, 0.5999999999999996),
(1.5620323105494585, 0.3570352850945734, 0.7000000000000002),
(1.5629855211134827, 0.4070261981650571, 0.8000000000000007),
(1.5639387316775069, 0.45701711123554056, 0.9000000000000004),
(1.5648919422415313, 0.5070080243060238, 1.0),
(1.5658451528055553, 0.5569989373765072, 1.0),
(1.5667983633695797, 0.6069898504469909, 1.0),
(1.5677515739336036, 0.6569807635174745, 1.0),
(1.5687047844976276, 0.7069716765879576, 1.0),
(1.569657995061652, 0.7569625896584408, 1.0),
(1.570611205625676, 0.8069535027289246, 1.0),
(1.5715644161897, 0.8569444157994082, 1.0),
(1.5725176267537244, 0.9069353288698918, 1.0),
(1.5734708373177484, 0.956926241940375, 1.0),
(1.5744240478817724, 1.006917155010858, 1.0),
(0.27121218893364296, -0.9931736833190418, 1.0),
(0.27264189650427045, -0.9431941281363226, 1.0),
(0.274071604074898, -0.8932145729536032, 1.0),
(0.27550131164552544, -0.8432350177708837, 1.0),
(0.27693101921615293, -0.793255462588164, 1.0),
(0.27836072678677964, -0.7432759074054449, 1.0),
(0.27979043435740714, -0.6932963522227258, 1.0),
(0.28122014192803463, -0.6433167970400061, 1.0),
(0.2826498494986621, -0.593337241857287, 1.0),
(0.28407955706928967, -0.5433576866745674, 1.0),
(0.2855092646399171, -0.4933781314918481, 1.0),
(0.2869389722105446, -0.44339857630912904, 0.9000000000000004),
(0.2883686797811721, -0.39341902112640936, 0.7999999999999998),
(0.28979838735179964, -0.34343946594369, 0.7000000000000002),
(0.2912280949224263, -0.29345991076097033, 0.5999999999999996),
(0.29265780249305456, -0.24348035557825098, 0.5),
(0.29408751006368133, -0.19350080039553202, 0.40000000000000036),
(0.29551721763430877, -0.14352124521281223, 0.2999999999999998),
(0.29694692520493626, -0.09354169003009301, 0.20000000000000018),
(0.29837663277556375, -0.04356213484737381, 0.09999999999999964),
(0.2998063403461913, 0.006417420335345516, 0.0),
(0.30123604791681796, 0.05639697551806486, 0.09999999999999964),
(0.3026657554874447, 0.10637653070078455, 0.20000000000000018),
(0.30409546305807217, 0.15635608588350425, 0.3000000000000007),
(0.30552517062869966, 0.20633564106622335, 0.40000000000000036),
(0.3069548781993272, 0.25631519624894267, 0.5),
(0.30838458576995464, 0.30629475143166185, 0.5999999999999996),
(0.30981429334058214, 0.3562743066143811, 0.7000000000000002),
(0.31124400091120963, 0.40625386179710105, | |
import unittest
import saspy
import os
import pandas as pd
from IPython.utils.tempdir import TemporaryDirectory
from pandas.util.testing import assert_frame_equal
class TestSASdataObject(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession(results='HTML') #cfgname='default')
#cls.assertIsInstance(cls.sas, saspy.SASsession, msg="sas = saspy.SASsession(...) failed")
@classmethod
def tearDownClass(cls):
if cls.sas:
cls.sas._endsas()
def test_SASdata(self):
#test sasdata method
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.assertIsInstance(cars, saspy.SASdata, msg="cars = sas.sasdata(...) failed")
def test_SASdata_batch(self):
#test set_batch()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.head()
self.assertIsInstance(ll, dict, msg="set_batch(True) didn't return dict")
def test_SASdata_head(self):
#test head()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.head()
expected = ['1', 'Acura', 'MDX', 'SUV', 'Asia', 'All', '$36,945', '$33,337',
'3.5', '6', '265', '17', '23', '4451', '106', '189']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.head() result didn't contain row 1")
@unittest.skip("Test failes with extra header info")
def test_SASdata_tail(self):
#test tail()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.tail()
expected = ['424', 'Volvo', 'C70', 'LPT', 'convertible', '2dr', 'Sedan', 'Europe', 'Front',
'$40,565', '$38,203', '2.4', '5', '197', '21', '28', '3450', '105', '186']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.tail() result didn't contain row 1")
def test_SASdata_tailPD(self):
#test tail()
cars = self.sas.sasdata('cars', libref='sashelp', results='pandas')
self.sas.set_batch(True)
ll = cars.tail()
self.assertEqual(ll.shape, (5,15), msg="wrong shape returned")
self.assertIsInstance(ll, pd.DataFrame, "Is return type correct")
def test_SASdata_contents(self):
#test contents()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.contents()
expected = ['Data', 'Set', 'Name', 'SASHELP.CARS', 'Observations', '428']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.contents() result didn't contain expected result")
def test_SASdata_describe(self):
#test describe()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.describe()
expected = ['MSRP', '428', '0', '27635', '32775', '19432', '10280', '20330', '27635']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.describe() result didn't contain expected result")
def test_SASdata_results(self):
#test set_results()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
cars.set_results('HTML')
ll = cars.describe()
expected = '<!DOCTYPE html>'
row1 = ll['LST'].splitlines()[0]
self.assertEqual(expected, row1, msg="cars.set_results() result weren't HTML")
cars.set_results('TEXT')
ll = cars.describe()
row1 = ll['LST'].splitlines()[0]
self.assertNotEqual(expected, row1, msg="cars.set_results() result weren't TEXT")
def test_SASdata_hist(self):
#test hist()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
cars.set_results('TEXT')
ll = cars.hist('MSRP')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.hist(...) didn't return dict")
self.assertGreater(len(ll['LST']), 40000, msg="cars.hist(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.hist(...) result weren't what was expected")
cars.set_results('HTML')
def test_SASdata_series(self):
#test series()
self.sas.set_batch(True)
ll = self.sas.submit('''proc sql;
create table sales as
select month, sum(actual) as tot_sales, sum(predict) as predicted_sales
from sashelp.prdsale
group by 1
order by month ;quit;
''')
sales = self.sas.sasdata('sales')
ll = sales.series(y=['tot_sales','predicted_sales'], x='month', title='total vs. predicted sales')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.series(...) didn't return dict")
self.assertGreater(len(ll['LST']), 70000, msg="cars.series(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.series(...) result weren't what was expected")
def test_SASdata_heatmap(self):
#test heatmap()
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.heatmap('MSRP','horsepower')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.heatmap(...) didn't return dict")
self.assertGreater(len(ll['LST']), 30000, msg="cars.heatmap(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.heatmap(...) result weren't what was expected")
def test_SASdata_sort1(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# Sort data in place by one variable
wkcars.sort('type')
self.assertIsInstance(wkcars, saspy.SASdata, msg="Sort didn't return SASdata Object")
def test_SASdata_sort2(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# Sort data in plce by multiple variables
wkcars.sort('type descending origin')
self.assertIsInstance(wkcars, saspy.SASdata, msg="Sort didn't return SASdata Object")
def test_SASdata_sort3(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create a second object pointing to the same data set
dup=wkcars.sort('type')
self.assertEqual(wkcars, dup, msg="Sort objects are not equal but should be")
def test_SASdata_sort4(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create a second object with a different sort order
diff=self.sas.sasdata('diff')
diff=wkcars.sort('origin',diff)
self.assertNotEqual(wkcars, diff, msg="Sort objects are equal but should not be")
def test_SASdata_sort5(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create object within call
wkcars.sort('type')
out1=wkcars.sort('origin', self.sas.sasdata('out1'))
self.assertIsInstance(out1, saspy.SASdata, msg="Sort didn't return new SASdata Object")
self.assertNotEqual(wkcars, out1, msg="Sort objects are equal but should not be")
def test_SASdata_sort6(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# sort by missing variable
self.assertRaises(RuntimeError, lambda: wkcars.sort('foobar'))
def test_SASdata_score1(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
a = wkcars.columnInfo()
wkcars.score(code='P_originUSA = origin;')
b = wkcars.columnInfo()
self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA")
def test_SASdata_score2(self):
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
wkcars.set_results('PANDAS')
wkcars2 = self.sas.sasdata('cars2', 'work')
wkcars2.set_results('PANDAS')
a = wkcars.columnInfo()
wkcars.score(code='P_originUSA = origin;', out=wkcars2)
b = wkcars.columnInfo()
self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a")
self.assertIsInstance(wkcars2, saspy.sasbase.SASdata, "Does out dataset exist")
def test_SASdata_score3(self):
with TemporaryDirectory() as temppath:
with open(os.path.join(temppath, 'score.sas'), 'w') as f:
f.write('P_originUSA = origin;')
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
wkcars.set_results('PANDAS')
wkcars2 = self.sas.sasdata('cars2', 'work')
wkcars2.set_results('PANDAS')
a = wkcars.columnInfo()
wkcars.score(file=f.name, out=wkcars2)
b = wkcars.columnInfo()
self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a")
self.assertIsInstance(wkcars2, saspy.sasbase.SASdata, "Does out dataset exist")
def test_SASdata_score4(self):
with TemporaryDirectory() as temppath:
with open(os.path.join(temppath, 'score.sas'), 'w') as f:
f.write('P_originUSA = origin;')
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
a = wkcars.columnInfo()
wkcars.score(file=f.name)
b = wkcars.columnInfo()
self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA")
def test_regScoreAssess(self):
stat = self.sas.sasstat()
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
with TemporaryDirectory() as temppath:
fname = os.path.join(temppath, 'hpreg_code.sas')
b = stat.hpreg(data=tr, model='weight=height', code=fname)
tr.score(file=os.path.join(temppath, 'hpreg_code.sas'))
# check that p_weight is in columnInfo
self.assertTrue('P_Weight' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found")
res1 = tr.assessModel(target = 'weight', prediction='P_weight', nominal=False)
a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG']
self.assertEqual(sorted(a), sorted(res1.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
self.assertIsInstance(res1, saspy.SASresults, "Is return type correct")
def test_regScoreAssess2(self):
stat = self.sas.sasstat()
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
with TemporaryDirectory() as temppath:
fname = os.path.join(temppath, 'hplogistic_code.sas')
b = stat.hplogistic(data=tr, model='sex = weight height', code=fname)
tr.score(file=fname)
# check that p_weight is in columnInfo
self.assertTrue('P_SexF' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found")
res1 = tr.assessModel(target = 'sex', prediction='P_SexF', nominal=True, event='F')
a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG', 'SGPLOT']
self.assertEqual(sorted(a), sorted(res1.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
self.assertIsInstance(res1, saspy.SASresults, "Is return type correct")
def test_partition1(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(var='sex', fraction = .5, kfold=1, out=None, singleOut=True)
self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition2(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(var='sex', fraction = .5, kfold=2, out=None, singleOut=True)
self.assertTrue('_cvfold2' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition3(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
out = self.sas.sasdata("class2", "work")
tr.set_results('PANDAS')
out.set_results('PANDAS')
tr.partition(var='sex', fraction = .5, kfold=2, out=out, singleOut=True)
self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition4(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
out = self.sas.sasdata("class2", "work")
tr.set_results('PANDAS')
out.set_results('PANDAS')
res1 = tr.partition(var='sex', fraction = .5, kfold=2, out=out, singleOut=False)
self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found")
self.assertIsInstance(res1, list, "Is return type correct")
self.assertIsInstance(res1[0], tuple, "Is return type correct")
self.assertIsInstance(res1[0][1], saspy.SASdata, "Is return type correct")
def test_partition5(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(fraction = .5, kfold=1, out=None, singleOut=True)
self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_info1(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('Pandas')
res = tr.info()
self.assertIsInstance(res, pd.DataFrame, msg= 'Data frame not returned')
self.assertEqual(res.shape, (5, 4), msg="wrong shape returned")
def test_info2(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('text')
res = tr.info()
self.assertIsNone(res, msg = "only works with Pandas" )
def test_info3(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('html')
res | |
relevant orbitals were generated and that no duplicates (2p and 2p_x) are "
"present"
)
kpoints_array = []
for ifilename, filename in enumerate(filenames):
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
if ifilename == 0:
self.nbands = int(parameters[6])
self.number_kpts = kpoints_object.num_kpts - int(contents[1].split()[2]) + 1
if len(contents[1:]) == self.nbands + 2:
self.is_spinpolarized = False
elif len(contents[1:]) == self.nbands * 2 + 2:
self.is_spinpolarized = True
else:
linenumbers = []
for iline, line in enumerate(contents[1 : self.nbands * 2 + 4]):
if line.split()[0] == "#":
linenumbers.append(iline)
if ifilename == 0:
if len(linenumbers) == 2:
self.is_spinpolarized = True
else:
self.is_spinpolarized = False
if ifilename == 0:
eigenvals = {}
eigenvals[Spin.up] = [
[collections.defaultdict(float) for i in range(self.number_kpts)] for j in range(self.nbands)
]
if self.is_spinpolarized:
eigenvals[Spin.down] = [
[collections.defaultdict(float) for i in range(self.number_kpts)] for j in range(self.nbands)
]
p_eigenvals = {}
p_eigenvals[Spin.up] = [
[
{
str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}
for e in atomnames
}
for i in range(self.number_kpts)
]
for j in range(self.nbands)
]
if self.is_spinpolarized:
p_eigenvals[Spin.down] = [
[
{
str(e): {str(orb): collections.defaultdict(float) for orb in atom_orbital_dict[e]}
for e in atomnames
}
for i in range(self.number_kpts)
]
for j in range(self.nbands)
]
ikpoint = -1
for iline, line in enumerate(contents[1:-1]):
if line.split()[0] == "#":
KPOINT = np.array(
[
float(line.split()[4]),
float(line.split()[5]),
float(line.split()[6]),
]
)
if ifilename == 0:
kpoints_array.append(KPOINT)
linenumber = 0
iband = 0
ikpoint += 1
if linenumber == self.nbands:
iband = 0
if line.split()[0] != "#":
if linenumber < self.nbands:
if ifilename == 0:
eigenvals[Spin.up][iband][ikpoint] = float(line.split()[1]) + self.efermi
p_eigenvals[Spin.up][iband][ikpoint][atomnames[ifilename]][orbital_names[ifilename]] = float(
line.split()[2]
)
if linenumber >= self.nbands and self.is_spinpolarized:
if ifilename == 0:
eigenvals[Spin.down][iband][ikpoint] = float(line.split()[1]) + self.efermi
p_eigenvals[Spin.down][iband][ikpoint][atomnames[ifilename]][orbital_names[ifilename]] = float(
line.split()[2]
)
linenumber += 1
iband += 1
self.kpoints_array = kpoints_array
self.eigenvals = eigenvals
self.p_eigenvals = p_eigenvals
label_dict = {}
for ilabel, label in enumerate(kpoints_object.labels[-self.number_kpts :], start=0):
if label is not None:
label_dict[label] = kpoints_array[ilabel]
self.label_dict = label_dict
def get_bandstructure(self):
"""
returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter
"""
return LobsterBandStructureSymmLine(
kpoints=self.kpoints_array,
eigenvals=self.eigenvals,
lattice=self.lattice,
efermi=self.efermi,
labels_dict=self.label_dict,
structure=self.structure,
projections=self.p_eigenvals,
)
class Bandoverlaps:
"""
Class to read in bandOverlaps.lobster files. These files are not created during every Lobster run.
.. attribute: bandoverlapsdict is a dict of the following form:
{spin:{"kpoint as string": {"maxDeviation": float that describes the max deviation, "matrix": 2D
array of the size number of bands times number of bands including the overlap matrices with } }}
.. attribute: maxDeviation is a list of floats describing the maximal Deviation for each problematic kpoint
"""
def __init__(self, filename: str = "bandOverlaps.lobster"):
"""
Args:
filename: filename of the "bandOverlaps.lobster" file
"""
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
self._read(contents)
def _read(self, contents: list):
"""
will read in all contents of the file
Args:
contents: list of strings
"""
self.bandoverlapsdict = {} # type: Dict
self.max_deviation = [] # type: List
# This has to be done like this because there can be different numbers of problematic k-points per spin
for line in contents:
if "Overlap Matrix (abs) of the orthonormalized projected bands for spin 0" in line:
spin = Spin.up
elif "Overlap Matrix (abs) of the orthonormalized projected bands for spin 1" in line:
spin = Spin.down
elif "k-point" in line:
kpoint = line.split(" ")
kpoint_array = []
for kpointel in kpoint:
if kpointel not in ["at", "k-point", ""]:
kpoint_array.append(str(kpointel))
elif "maxDeviation" in line:
if spin not in self.bandoverlapsdict:
self.bandoverlapsdict[spin] = {}
if not " ".join(kpoint_array) in self.bandoverlapsdict[spin]:
self.bandoverlapsdict[spin][" ".join(kpoint_array)] = {}
maxdev = line.split(" ")[2]
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["maxDeviation"] = float(maxdev)
self.max_deviation.append(float(maxdev))
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["matrix"] = []
else:
overlaps = []
for el in line.split(" "):
if el not in [""]:
overlaps.append(float(el))
self.bandoverlapsdict[spin][" ".join(kpoint_array)]["matrix"].append(overlaps)
def has_good_quality_maxDeviation(self, limit_maxDeviation: float = 0.1) -> bool:
"""
will check if the maxDeviation from the ideal bandoverlap is smaller or equal to limit_maxDeviation
Args:
limit_maxDeviation: limit of the maxDeviation
Returns:
Boolean that will give you information about the quality of the projection
"""
for deviation in self.max_deviation:
if deviation > limit_maxDeviation:
return False
return True
def has_good_quality_check_occupied_bands(
self,
number_occ_bands_spin_up: int,
number_occ_bands_spin_down: Optional[int] = None,
spin_polarized: bool = False,
limit_deviation: float = 0.1,
) -> bool:
"""
will check if the deviation from the ideal bandoverlap of all occupied bands is smaller or equal to
limit_deviation
Args:
number_occ_bands_spin_up (int): number of occupied bands of spin up
number_occ_bands_spin_down (int): number of occupied bands of spin down
spin_polarized (bool): If True, then it was a spin polarized calculation
limit_deviation (float): limit of the maxDeviation
Returns:
Boolean that will give you information about the quality of the projection
"""
for matrix in self.bandoverlapsdict[Spin.up].values():
for iband1, band1 in enumerate(matrix["matrix"]):
for iband2, band2 in enumerate(band1):
if iband1 < number_occ_bands_spin_up and iband2 < number_occ_bands_spin_up:
if iband1 == iband2:
if abs(band2 - 1.0) > limit_deviation:
return False
else:
if band2 > limit_deviation:
return False
if spin_polarized:
for matrix in self.bandoverlapsdict[Spin.down].values():
for iband1, band1 in enumerate(matrix["matrix"]):
for iband2, band2 in enumerate(band1):
if number_occ_bands_spin_down is not None:
if iband1 < number_occ_bands_spin_down and iband2 < number_occ_bands_spin_down:
if iband1 == iband2:
if abs(band2 - 1.0) > limit_deviation:
return False
else:
if band2 > limit_deviation:
return False
else:
ValueError("number_occ_bands_spin_down has to be specified")
return True
class Grosspop:
"""
Class to read in GROSSPOP.lobster files.
.. attribute: list_dict_grosspop
which is a list of dicts including all information about the grosspopulations, one sample dict looks like this:
{'element': 'O', 'Mulliken GP': {'2s': '1.80', '2p_y': '1.83', '2p_z': '1.79', '2p_x': '1.75', 'total': '7.18'},
'Loewdin GP': {'2s': '1.60', '2p_y': '1.82', '2p_z': '1.77', '2p_x': '1.73', 'total': '6.92'}}
The 0. entry of the list refers to the first atom in GROSSPOP.lobster and so on.
"""
def __init__(self, filename: str = "GROSSPOP.lobster"):
"""
Args:
filename: filename of the "GROSSPOP.lobster" file
"""
# opens file
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
self.list_dict_grosspop = [] # type: List[Any]
# transfers content of file to list of dict
for line in contents[3:]:
cleanline = [i for i in line.split(" ") if not i == ""]
if len(cleanline) == 5:
smalldict = {}
smalldict["element"] = cleanline[1]
smalldict["Mulliken GP"] = {}
smalldict["Loewdin GP"] = {}
smalldict["Mulliken GP"][cleanline[2]] = float(cleanline[3])
smalldict["Loewdin GP"][cleanline[2]] = float(cleanline[4])
elif len(cleanline) > 0:
smalldict["Mulliken GP"][cleanline[0]] = float(cleanline[1])
smalldict["Loewdin GP"][cleanline[0]] = float(cleanline[2])
if "total" in cleanline[0]:
self.list_dict_grosspop.append(smalldict)
def get_structure_with_total_grosspop(self, structure_filename: str) -> Structure:
"""
get a Structure with Mulliken and Loewdin total grosspopulations as site properties
Args:
structure_filename (str): filename of POSCAR
Returns:
Structure Object with Mulliken and Loewdin total grosspopulations as site properties
"""
struct = Structure.from_file(structure_filename)
site_properties = {} # type: Dict[str, Any]
mullikengp = []
loewdingp = []
for grosspop in self.list_dict_grosspop:
mullikengp.append(grosspop["Mulliken GP"]["total"])
loewdingp.append(grosspop["Loewdin GP"]["total"])
site_properties = {
"Total Mulliken GP": mullikengp,
"Total Loewdin GP": loewdingp,
}
new_struct = struct.copy(site_properties=site_properties)
return new_struct
class Wavefunction:
"""
Class to read in wave function files from Lobster and transfer them into an object of the type VolumetricData
.. attribute: grid
grid for the wave function [Nx+1,Ny+1,Nz+1]
.. attribute: points
list of points
.. attribute: real
list of real part of wave function
.. attribute: imaginary
list of imaginary part of wave function
.. attribute: distance
list of distance to first point in wave function file
"""
def __init__(self, filename, structure):
"""
Args:
filename: filename of wavecar file from Lobster
structure: Structure object (e.g., created by Structure.from_file(""))
"""
self.filename = filename
self.structure = structure
(
self.grid,
self.points,
self.real,
self.imaginary,
self.distance,
) = Wavefunction._parse_file(filename)
@staticmethod
def _parse_file(filename):
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
points = []
distance = []
real = []
imaginary = []
splitline = contents[0].split()
grid = [int(splitline[7]), int(splitline[8]), int(splitline[9])]
for line in contents[1:]:
splitline = line.split()
if len(splitline) >= 6:
points.append([float(splitline[0]), float(splitline[1]), float(splitline[2])])
distance.append(float(splitline[3]))
real.append(float(splitline[4]))
imaginary.append(float(splitline[5]))
if not len(real) == grid[0] * grid[1] * grid[2]:
raise ValueError("Something went wrong while reading the file")
if not len(imaginary) == grid[0] * grid[1] * grid[2]:
raise ValueError("Something went wrong | |
"""
The process handler module implements the ProcessHandler class
Created on Jan 21, 2016
@author: <NAME>
"""
import datetime
import multiprocessing
import os
from multiprocessing import Process
import time
import queue
from bson.objectid import ObjectId
from of.common.logging import write_to_log, EC_BREAKIN, SEV_ERROR
from of.common.messaging.factory import log_process_state_message, reply_with_error_message, get_current_login
from plugins.optimalbpm.broker.messaging.factory import bpm_process_control, worker_process_control, get_current_login
from of.common.queue.handler import Handler
from plugins.optimalbpm.agent.lib.worker.run import run_worker_process
from of.schemas.constants import zero_object_id
import of.common.logging
__author__ = '<NAME>'
class WorkerSupervisor(Handler):
"""
The Worker supervisor is responsible for managing the worker processes and their messaging.
It also is responsible for queueing incoming jobs.
"""
#: A dictionary with all workers, their keys are their process_id is key.
workers = None
#: A list of available workers
available_workers = None
#: A dictionary with the busy (currently running BPM processes) workers, their keys are their process_id is key.
busy_workers = None
#: A queue with processes that haven't been able to be run yet due to lack of available workers.
job_queue = None
#: The maximum allowed number of worker processes
max_worker_count = None
#: The base folder for all source repositories
repo_base_folder = None
#: The monitor of the queue that this handler gets its commands from.
monitor = None
#: The main agent message monitor
message_monitor = None
#: Statistic: Total jobs run
total_jobs_run = None
#: Log level
severity = None
def __init__(self, _process_id, _repo_base_folder, _message_monitor, _severity, _max_worker_count=None):
"""
Initates a WorkerSupervisor, initiates the queue and creates a function map.
:param _process_id: The id of the process
:param _repo_base_folder: The base folder for the repository
:param _message_monitor: The main agent message monitor
:param _max_worker_count: The maximum allowed number of worker processes
"""
super(WorkerSupervisor, self).__init__(_process_id)
self.busy_workers = {}
self.workers = {}
self.available_workers = []
self.severity = _severity
if _max_worker_count:
self.max_worker_count = _max_worker_count
else:
self.max_worker_count = 6
self.job_queue = queue.Queue()
self.schema_id__handler = {
"ref://of.message_bpm_process_start": self.handle_bpm_process_start
}
self.repo_base_folder = _repo_base_folder
self.message_monitor = _message_monitor
self.message_monitor.handler.process_handler = self
self.total_jobs_run = 0
def on_monitor_init(self, _monitor):
"""
Is called by the queue monitor that calls this handler when its queue gets new items
:param _monitor: A queue monitor
"""
# Make the handler aware of
self.monitor = _monitor
def initialize_worker(self):
"""
Initialize a new worker process
"""
# Start a worker process
# TODO: Look at RunAs and similar to run in context (PROD-26)
_new_process_id = str(ObjectId())
_new_queue_manager = multiprocessing.Manager()
_new_queue = _new_queue_manager.Queue()
_new_process = Process(target=run_worker_process, daemon=False, # is in documentation, not skeleton
args=(self.process_id, _new_process_id, _new_queue,
self.monitor.queue, self.repo_base_folder,
self.severity))
self.write_dbg_info("Calling worker process start.")
_new_process.start()
self.write_dbg_info("Worker process pid: " + str(_new_process.pid))
_new_worker = {
"queue": _new_queue,
"pid": _new_process.pid,
"process": _new_process,
"processId": _new_process_id,
"spawnedWhen": str(datetime.datetime.utcnow()),
}
# Report worker process instance to server
self.message_monitor.queue.put([None, {
"_id": _new_process_id,
"parent_id": self.process_id,
"spawnedBy": get_current_login(),
"systemPid": _new_process.pid,
"spawnedWhen": _new_worker["spawnedWhen"],
"name": "<NAME>",
"schemaRef": "ref://of.process.system"
}])
self.workers[_new_process_id] = _new_worker
return _new_worker
def acquire_worker(self, _process_id):
"""
Acquire an available worker process for executing a BPM process, if needed and possible, create a new one.
If successful, the process is added to busy_workers and returned, if not, return none.
:param _process_id: The BPM process Id
:return Returns a worker if possible, otherwise none
"""
if len(self.available_workers) > 0:
# There are available workers, take one
_worker = self.available_workers.pop()
self.write_dbg_info("Process " + str(_process_id) + ": Found available worker.")
elif len(self.available_workers) + len(self.busy_workers) < self.max_worker_count:
# There are no available workers, and we have not yet reached the maximum number of allowed workers
_worker = self.initialize_worker()
self.write_dbg_info("Process " + str(_process_id) + ": Started new worker.")
else:
# There were no workers available
return None
self.busy_workers[_process_id] = _worker
# Update worker information
_worker["busy_since"] = datetime.datetime.utcnow()
_worker["bpm_process_id"] = _process_id
return _worker
def release_worker(self, _process_id):
"""
Release a worker that is not needed.
If there is any more work on the queue, re-use the worker.
:param _process_id: The BPM process id of the finished BPM process
"""
if _process_id in self.busy_workers:
_worker = self.busy_workers[_process_id]
else:
raise Exception(self.log_prefix + "release_worker - Invalid processId :" + _process_id)
self.total_jobs_run += 1
self.write_dbg_info("Releasing worker process " + _worker["processId"] + " from " + _process_id +
". Jobs run: " + str(self.total_jobs_run))
# Remove the worker from the list of busy workers
del self.busy_workers[_process_id]
# A worker has just become available, reuse for the next job.
try:
_next_job = self.job_queue.get(block=False)
# If there is a job run it
self.write_dbg_info("Re-using worker process for " + _next_job["processId"])
self.busy_workers[_next_job["processId"]] = _worker
_worker["queue"].put(_next_job)
except queue.Empty:
# There was no job on the queue, make the worker available.
self.available_workers.append(_worker)
def handle_bpm_process_start(self, _message_data):
"""
Handle and act on BPM process start messages.
:param _message_data: The start message
"""
self.write_dbg_info("BPM Process(definition: " + _message_data[
"processDefinitionId"] + "): Looking for a worker.")
_worker = self.acquire_worker(_message_data["processId"])
if _worker:
# Add the message to its queue
self.write_dbg_info("BPM Process " + _message_data["processId"] +
": Putting the message on the workers' queue.")
_worker["queue"].put(_message_data)
else:
# There was no available worker, put the job on the queue
self.job_queue.put(_message_data)
self.message_monitor.queue.put([None,
log_process_state_message(
_changed_by=zero_object_id,
_state="queued",
_process_id=_message_data["processId"],
_reason="Queued until a worker becomes available")]
)
self.write_dbg_info("BPM Process(definition: " + _message_data[
"processDefinitionId"] + "): Queued for execution when a worker becomes available. "
"Max worker count limit reached.")
def handle(self, _item):
"""
Called when the monitor has gotten a message from a worker process
"""
if _item[0]:
# If the source websocket is set, it is not from a worker process, raise error.
raise Exception("The process handler only handles messages from worker processes.")
else:
# TODO: Should any filtering be done here? (PROD-27)
_message_data = _item[1]
if not isinstance(_message_data, dict):
write_to_log("A worker process sent message data that is not a dict, this might be a an attack.",
_category=EC_BREAKIN, _severity=SEV_ERROR)
raise Exception(self.log_prefix + "A worker process sent message data that is not a dict, this "
"might be a an attack: " + str(_message_data))
if "schemaRef" not in _message_data:
raise Exception(self.log_prefix + "Missing schemaRef: " + str(_message_data))
if _message_data["schemaRef"] == "ref://bpm.message.bpm.process.result":
# A process result message implies that the worker is done and available for new jobs
self.release_worker(_message_data["sourceProcessId"])
elif _message_data["schemaRef"] == "ref://of.log.process_state" and \
_message_data["processId"] in self.workers and \
_message_data["state"] in ["killed"]:
# If a worker is logging that it is being killed, it should be remove from the workers
self.write_dbg_info(self.log_prefix + "Worker " + _message_data["processId"] + " shut down, removing from workers.")
del self.workers[_message_data["processId"]]
self.write_dbg_info("Forwarding " + str(_message_data))
# Pass the message on to the message queue, heading for the last destination
self.message_monitor.queue.put([_item[0], _item[1]])
def kill_unresponsive_bpm_process(self, _bpm_process_id, _user_id):
"""
Kill a worked process
:param _bpm_process_id: The process id of the BPM process
:param _user_id: The user id of the killer
"""
_worker = self.busy_workers[_bpm_process_id]
_worker["process"].terminate()
del self.busy_workers[_bpm_process_id]
del self.workers[_worker["processId"]]
# Send first a state message for the (logical) BPM process
self.message_monitor.queue.put([None, log_process_state_message(_changed_by=_user_id,
_state="killed",
_process_id=_bpm_process_id,
_reason="Unresponsive, killed.")])
# Then a state message for the actual worker process
self.message_monitor.queue.put([None, log_process_state_message(_changed_by=_user_id,
_state="killed",
_process_id=_worker["processId"],
_reason="Had an unresponsive BPM process")])
self.write_dbg_info(self.log_prefix + "Killed")
def forward_message(self, _message_data):
"""
Forwards a incoming message to the proper worker process queue
"""
if _message_data["schemaRef"] == "ref://bpm.message.bpm.process.start":
# It is a process start message, start a process
self.handle_bpm_process_start(_message_data)
elif _message_data["schemaRef"] == "ref://bpm.message.bpm.process.command" and \
_message_data["command"] == "kill":
# It is a command to kill bpm process, i.e. also the worker, do so.
self.write_dbg_info(self.log_prefix + "Kill " + str(_message_data))
self.kill_unresponsive_bpm_process(_message_data["destinationProcessId"], _message_data["userId"])
elif "destinationProcessId" not in _message_data:
raise Exception("Missing destinationProcessId: " + str(_message_data))
elif _message_data["destinationProcessId"] in self.busy_workers:
# Route the data to its destination
# The mockup must reference in exactly the same way..
self.busy_workers[_message_data["destinationProcessId"]]["queue"].put(_message_data)
else:
raise Exception("Invalid destinationProcessId: " + _message_data["destinationProcessId"])
def shut_down(self, _user_id):
"""
Shuts down the worker handler and all jobs.
:param _user_id:
:return:
"""
try:
"""
TODO: First tell broker and incoming message handle we are shutting down.
Broker should then remove agent from destinations for messaging
Move all available workers processes to temporary list, shut them all down
All queued jobs should be unqueued and be returned to the broker to be run elsewhere
Pause(means do not run next step all running jobs and send states to broker where they | |
<filename>kairon/shared/account/processor.py
from datetime import datetime
from typing import Dict, Text
from loguru import logger as logging
from mongoengine.errors import DoesNotExist
from mongoengine.errors import ValidationError
from pydantic import SecretStr
from validators import ValidationFailure
from validators import email as mail_check
from kairon.exceptions import AppException
from kairon.shared.account.data_objects import Account, User, Bot, UserEmailConfirmation, Feedback, UiConfig, \
MailTemplates, SystemProperties, BotAccess
from kairon.shared.actions.data_objects import FormValidationAction, SlotSetAction, EmailActionConfig
from kairon.shared.data.constant import ACCESS_ROLES, ACTIVITY_STATUS
from kairon.shared.data.data_objects import BotSettings, ChatClientConfig, SlotMapping
from kairon.shared.utils import Utility
Utility.load_email_configuration()
class AccountProcessor:
@staticmethod
def add_account(name: str, user: str):
"""
adds a new account
:param name: account name
:param user: user id
:return: account id
"""
if Utility.check_empty_string(name):
raise AppException("Account Name cannot be empty or blank spaces")
Utility.is_exist(
Account,
exp_message="Account name already exists!",
name__iexact=name,
status=True,
)
license = {"bots": 2, "intents": 3, "examples": 20, "training": 3, "augmentation": 5}
return Account(name=name.strip(), user=user, license=license).save().to_mongo().to_dict()
@staticmethod
def get_account(account: int):
"""
fetch account object
:param account: account id
:return: account details
"""
try:
account = Account.objects().get(id=account).to_mongo().to_dict()
return account
except:
raise DoesNotExist("Account does not exists")
@staticmethod
def add_bot(name: str, account: int, user: str, is_new_account: bool = False):
"""
add a bot to account
:param name: bot name
:param account: account id
:param user: user id
:param is_new_account: True if it is a new account
:return: bot id
"""
from kairon.shared.data.processor import MongoProcessor
from kairon.shared.data.data_objects import BotSettings
if Utility.check_empty_string(name):
raise AppException("Bot Name cannot be empty or blank spaces")
if Utility.check_empty_string(user):
raise AppException("user cannot be empty or blank spaces")
Utility.is_exist(
Bot,
exp_message="Bot already exists!",
name__iexact=name,
account=account,
status=True,
)
bot = Bot(name=name, account=account, user=user).save().to_mongo().to_dict()
bot_id = bot['_id'].__str__()
if not is_new_account:
AccountProcessor.allow_access_to_bot(bot_id, user, user, account, ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.ACTIVE.value)
BotSettings(bot=bot_id, user=user).save()
processor = MongoProcessor()
config = processor.load_config(bot_id)
processor.add_or_overwrite_config(config, bot_id, user)
processor.add_default_fallback_data(bot_id, user, True, True)
return bot
@staticmethod
def list_bots(account_id: int):
for bot in Bot.objects(account=account_id, status=True):
bot = bot.to_mongo().to_dict()
bot.pop('status')
bot['_id'] = bot['_id'].__str__()
yield bot
@staticmethod
def update_bot(name: Text, bot: Text):
if Utility.check_empty_string(name):
raise AppException('Name cannot be empty')
try:
bot_info = Bot.objects(id=bot, status=True).get()
bot_info.name = name
bot_info.save()
except DoesNotExist:
raise AppException('Bot not found')
@staticmethod
def delete_bot(bot: Text, user: Text):
from kairon.shared.data.data_objects import Intents, Responses, Stories, Configs, Endpoints, Entities, \
EntitySynonyms, Forms, LookupTables, ModelDeployment, ModelTraining, RegexFeatures, Rules, SessionConfigs, \
Slots, TrainingDataGenerator, TrainingExamples
from kairon.shared.test.data_objects import ModelTestingLogs
from kairon.shared.importer.data_objects import ValidationLogs
from kairon.shared.actions.data_objects import HttpActionConfig, ActionServerLogs, Actions
try:
bot_info = Bot.objects(id=bot, status=True).get()
bot_info.status = False
bot_info.save()
Utility.hard_delete_document([
Actions, BotAccess, BotSettings, Configs, ChatClientConfig, Endpoints, Entities, EmailActionConfig,
EntitySynonyms, Forms, FormValidationAction, HttpActionConfig, Intents, LookupTables, RegexFeatures,
Responses, Rules, SlotMapping, SlotSetAction, SessionConfigs, Slots, Stories, TrainingDataGenerator,
TrainingExamples, ActionServerLogs, ModelTraining, ModelTestingLogs, ModelDeployment, ValidationLogs
], bot, user=user)
AccountProcessor.remove_bot_access(bot)
except DoesNotExist:
raise AppException('Bot not found')
@staticmethod
def fetch_role_for_user(email: Text, bot: Text):
try:
return BotAccess.objects(accessor_email=email, bot=bot,
status=ACTIVITY_STATUS.ACTIVE.value).get().to_mongo().to_dict()
except DoesNotExist as e:
logging.error(e)
raise AppException('Access to bot is denied')
@staticmethod
def get_accessible_bot_details(account_id: int, email: Text):
shared_bots = []
account_bots = list(AccountProcessor.list_bots(account_id))
for bot in BotAccess.objects(accessor_email=email, bot_account__ne=account_id,
status=ACTIVITY_STATUS.ACTIVE.value):
bot_details = AccountProcessor.get_bot(bot['bot'])
bot_details.pop('status')
bot_details['_id'] = bot_details['_id'].__str__()
shared_bots.append(bot_details)
return {
'account_owned': account_bots,
'shared': shared_bots
}
@staticmethod
def allow_bot_and_generate_invite_url(bot: Text, email: Text, user: Text, bot_account: int,
role: ACCESS_ROLES = ACCESS_ROLES.TESTER.value):
bot_details = AccountProcessor.allow_access_to_bot(bot, email, user, bot_account, role)
if Utility.email_conf["email"]["enable"]:
token = Utility.generate_token(email)
link = f'{Utility.email_conf["app"]["url"]}/{bot}/invite/accept/{token}'
return bot_details['name'], link
@staticmethod
def allow_access_to_bot(bot: Text, accessor_email: Text, user: Text,
bot_account: int, role: ACCESS_ROLES = ACCESS_ROLES.TESTER.value,
activity_status: ACTIVITY_STATUS = ACTIVITY_STATUS.INVITE_NOT_ACCEPTED.value):
"""
Adds bot to a user account.
:param bot: bot id
:param accessor_email: email id of the new member
:param user: user adding the new member
:param bot_account: account where bot exists
:param activity_status: can be one of active, inactive or deleted.
:param role: can be one of admin, designer or tester.
"""
bot_details = AccountProcessor.get_bot(bot)
Utility.is_exist(BotAccess, 'User is already a collaborator', accessor_email=accessor_email, bot=bot,
status__ne=ACTIVITY_STATUS.DELETED.value)
BotAccess(
accessor_email=accessor_email,
bot=bot,
role=role,
user=user,
bot_account=bot_account,
status=activity_status
).save()
return bot_details
@staticmethod
def update_bot_access(bot: Text, accessor_email: Text, user: Text,
role: ACCESS_ROLES = ACCESS_ROLES.TESTER.value,
status: ACTIVITY_STATUS = ACTIVITY_STATUS.ACTIVE.value):
"""
Adds bot to a user account.
:param bot: bot id
:param accessor_email: email id of the new member
:param user: user adding the new member
:param role: can be one of admin, designer or tester.
:param status: can be one of active, inactive or deleted.
"""
AccountProcessor.get_bot(bot)
try:
bot_access = BotAccess.objects(accessor_email=accessor_email, bot=bot).get()
if Utility.email_conf["email"]["enable"]:
if status != ACTIVITY_STATUS.DELETED.value and bot_access.status == ACTIVITY_STATUS.INVITE_NOT_ACCEPTED.value:
raise AppException('User is yet to accept the invite')
bot_access.role = role
bot_access.user = user
bot_access.status = status
bot_access.timestamp = datetime.utcnow()
bot_access.save()
except DoesNotExist:
raise AppException('User not yet invited to collaborate')
@staticmethod
def accept_bot_access_invite(token: Text, bot: Text):
"""
Activate user's access to bot.
:param token: token sent in the link
:param bot: bot id
"""
bot_details = AccountProcessor.get_bot(bot)
accessor_email = Utility.verify_token(token)
AccountProcessor.get_user_details(accessor_email)
try:
bot_access = BotAccess.objects(accessor_email=accessor_email, bot=bot,
status=ACTIVITY_STATUS.INVITE_NOT_ACCEPTED.value).get()
bot_access.status = ACTIVITY_STATUS.ACTIVE.value
bot_access.accept_timestamp = datetime.utcnow()
bot_access.save()
return bot_access.user, bot_details['name'], bot_access.accessor_email, bot_access.role
except DoesNotExist:
raise AppException('No pending invite found for this bot and user')
@staticmethod
def remove_bot_access(bot: Text, **kwargs):
"""
Removes bot from either for all users or only for user supplied.
:param bot: bot id
:param kwargs: can be either account or email.
"""
if kwargs:
if not Utility.is_exist(BotAccess, None, False, **kwargs, bot=bot, status__ne=ACTIVITY_STATUS.DELETED.value):
raise AppException('User not a collaborator to this bot')
active_bot_access = BotAccess.objects(**kwargs, bot=bot, status__ne=ACTIVITY_STATUS.DELETED.value)
else:
active_bot_access = BotAccess.objects(bot=bot, status__ne=ACTIVITY_STATUS.DELETED.value)
active_bot_access.update(set__status=ACTIVITY_STATUS.DELETED.value)
@staticmethod
def list_bot_accessors(bot: Text):
"""
List users who have access to bot.
:param bot: bot id
"""
for accessor in BotAccess.objects(bot=bot, status__ne=ACTIVITY_STATUS.DELETED.value):
accessor = accessor.to_mongo().to_dict()
accessor['_id'] = accessor['_id'].__str__()
yield accessor
@staticmethod
def get_bot(id: str):
"""
fetches bot details
:param id: bot id
:return: bot details
"""
try:
return Bot.objects().get(id=id).to_mongo().to_dict()
except:
raise DoesNotExist("Bot does not exists!")
@staticmethod
def add_user(
email: str,
password: str,
first_name: str,
last_name: str,
account: int,
user: str,
is_integration_user=False
):
"""
adds new user to the account
:param email: user login id
:param password: <PASSWORD>
:param first_name: user firstname
:param last_name: user lastname
:param account: account id
:param user: user id
:param is_integration_user: is this
:return: user details
"""
if (
Utility.check_empty_string(email)
or Utility.check_empty_string(last_name)
or Utility.check_empty_string(first_name)
or Utility.check_empty_string(password)
):
raise AppException(
"Email, FirstName, LastName and password cannot be empty or blank spaces"
)
Utility.is_exist(
User,
exp_message="User already exists! try with different email address.",
email__iexact=email.strip(),
status=True,
)
return (
User(
email=email.strip(),
password=Utility.get_password_hash(password.strip()),
first_name=first_name.strip(),
last_name=last_name.strip(),
account=account,
user=user.strip(),
is_integration_user=is_integration_user,
)
.save()
.to_mongo()
.to_dict()
)
@staticmethod
def get_user(email: str):
"""
fetch user details
:param email: user login id
:return: user details
"""
try:
return User.objects().get(email=email).to_mongo().to_dict()
except Exception as e:
logging.error(e)
raise DoesNotExist("User does not exist!")
@staticmethod
def get_user_details(email: str):
"""
fetches complete user details, checks for whether it is inactive
:param email: login id
:return: dict
"""
user = AccountProcessor.get_user(email)
if not user["is_integration_user"]:
AccountProcessor.check_email_confirmation(user["email"])
if not user["status"]:
raise ValidationError("Inactive User please contact admin!")
account = AccountProcessor.get_account(user["account"])
if not account["status"]:
raise ValidationError("Inactive Account Please contact system admin!")
return user
@staticmethod
def get_complete_user_details(email: str):
"""
fetches complete user details including account and bot
:param email: login id
:return: dict
"""
user = AccountProcessor.get_user(email)
account = AccountProcessor.get_account(user["account"])
bots = AccountProcessor.get_accessible_bot_details(user["account"], email)
user["account_name"] = account["name"]
user['bots'] = bots
user["_id"] = user["_id"].__str__()
user.pop('password')
return user
@staticmethod
def get_integration_user(bot: str, account: int):
"""
creates integration user if it does not exist
:param bot: bot id
:param account: account id
:return: dict
"""
email = f"{<EMAIL>"
if not Utility.is_exist(
User, raise_error=False, email=email, is_integration_user=True, status=True
):
password = <PASSWORD>()
user_details = AccountProcessor.add_user(
email=email,
password=password,
first_name=bot,
last_name=bot,
account=account,
user="auto_gen",
is_integration_user=True,
)
AccountProcessor.allow_access_to_bot(bot, email.strip(), "auto_gen", account,
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.ACTIVE.value)
return user_details
else:
return (
User.objects(email=email).get(is_integration_user=True).to_mongo().to_dict()
)
@staticmethod
async def account_setup(account_setup: Dict, user: Text):
"""
create new account
:param account_setup: dict of account details
:param user: user id
:return: dict user details, user email id, confirmation mail subject, mail body
"""
from kairon.shared.data.processor import MongoProcessor
account = None
bot = None
mail_to = None
email_enabled = Utility.email_conf["email"]["enable"]
link = None
try:
account = AccountProcessor.add_account(account_setup.get("account"), user)
bot = AccountProcessor.add_bot('Hi-Hello', account["_id"], user, True)
user_details = AccountProcessor.add_user(
email=account_setup.get("email"),
first_name=account_setup.get("first_name"),
last_name=account_setup.get("last_name"),
password=<PASSWORD>_setup.get("password").get_secret_value(),
account=account["_id"].__str__(),
user=user
)
AccountProcessor.allow_access_to_bot(bot["_id"].__str__(), account_setup.get("email"),
account_setup.get("email"), account['_id'],
ACCESS_ROLES.ADMIN.value, ACTIVITY_STATUS.ACTIVE.value)
await MongoProcessor().save_from_path(
"template/use-cases/Hi-Hello", bot["_id"].__str__(), user="sysadmin"
)
if email_enabled:
token = Utility.generate_token(account_setup.get("email"))
link = Utility.email_conf["app"]["url"] + '/verify/' + token
mail_to = account_setup.get("email")
except Exception as e:
if account and "_id" in account:
Account.objects().get(id=account["_id"]).delete()
if bot and | |
= situation_object(CAR)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[baby, ball, car],
actions=[
Action(
action_type=TAKE, argument_roles_to_fillers=[(AGENT, baby), (THEME, ball)]
)
],
after_action_relations=[near(ball, car)],
)
assert generated_tokens(situation) == (
"a",
"baby",
"takes",
"a",
"ball",
"to",
"a",
"car",
)
@pytest.mark.skip(
"Disabling because BABY is now a recognized particular, "
"and you can't have multiple recognized particulars in a situation"
)
def test_arguments_same_ontology_type():
baby_0 = situation_object(BABY)
baby_1 = situation_object(BABY)
cookie = situation_object(COOKIE)
for prefer_ditransitive in (True, False):
syntax_hints = [PREFER_DITRANSITIVE] if prefer_ditransitive else []
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[baby_0, baby_1, cookie],
actions=[
Action(
action_type=GIVE,
argument_roles_to_fillers=[
(AGENT, baby_0),
(GOAL, baby_1),
(THEME, cookie),
],
)
],
syntax_hints=syntax_hints,
)
reference_tokens: Tuple[str, ...]
if prefer_ditransitive:
reference_tokens = ("a", "baby", "gives", "a", "baby", "a", "cookie")
else:
reference_tokens = ("a", "baby", "gives", "a", "cookie", "to", "a", "baby")
assert generated_tokens(situation) == reference_tokens
def test_bird_flies_over_dad():
bird = situation_object(BIRD)
dad = situation_object(DAD)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[bird, dad],
actions=[
Action(
FLY,
argument_roles_to_fillers=[(AGENT, bird)],
during=DuringAction(
at_some_point=[
Relation(
IN_REGION,
bird,
Region(
reference_object=dad,
distance=DISTAL,
direction=GRAVITATIONAL_UP,
),
)
]
),
)
],
)
assert generated_tokens(situation) == ("a", "bird", "flies", "over", "Dad")
def test_bird_flies_path_beside():
bird = situation_object(BIRD)
car = situation_object(CAR)
car_region = Region(
car,
distance=PROXIMAL,
direction=Direction(
positive=True, relative_to_axis=HorizontalAxisOfObject(car, index=0)
),
)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[bird, car],
actions=[
Action(
FLY,
argument_roles_to_fillers=[(AGENT, bird)],
during=DuringAction(
objects_to_paths=[
(
bird,
SpatialPath(
VIA,
reference_source_object=car_region,
reference_destination_object=car_region,
reference_axis=HorizontalAxisOfObject(car, index=0),
),
)
],
at_some_point=[Relation(IN_REGION, bird, car_region)],
),
)
],
)
assert generated_tokens(situation) == ("a", "bird", "flies", "beside", "a", "car")
def test_bird_flies_up():
bird = situation_object(BIRD)
ground = situation_object(GROUND)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[bird],
actions=[
Action(
FLY,
argument_roles_to_fillers=[(AGENT, bird)],
during=DuringAction(
objects_to_paths=[
(
bird,
SpatialPath(
operator=AWAY_FROM,
reference_source_object=ground,
reference_destination_object=Region(
ground, distance=DISTAL
),
),
)
]
),
)
],
syntax_hints=[USE_ADVERBIAL_PATH_MODIFIER],
)
assert generated_tokens(situation) == ("a", "bird", "flies", "up")
def test_jump_up():
dad = situation_object(DAD)
ground = situation_object(GROUND)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[dad],
actions=[
Action(
JUMP,
argument_roles_to_fillers=[(AGENT, dad)],
auxiliary_variable_bindings=[(JUMP_INITIAL_SUPPORTER_AUX, ground)],
)
],
syntax_hints=[USE_ADVERBIAL_PATH_MODIFIER],
)
assert generated_tokens(situation) == ("Dad", "jumps", "up")
def test_jumps_over():
dad = situation_object(DAD)
chair = situation_object(CHAIR)
ground = situation_object(GROUND)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[dad, chair],
actions=[
Action(
JUMP,
argument_roles_to_fillers=[(AGENT, dad)],
during=DuringAction(at_some_point=[strictly_above(dad, chair)]),
auxiliary_variable_bindings=[(JUMP_INITIAL_SUPPORTER_AUX, ground)],
)
],
)
assert generated_tokens(situation) == ("Dad", "jumps", "over", "a", "chair")
def test_mom_drinks_juice():
mom = situation_object(MOM)
juice = situation_object(JUICE)
cup = situation_object(CUP)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[mom, juice],
actions=[
Action(
DRINK,
argument_roles_to_fillers=[(AGENT, mom), (THEME, juice)],
auxiliary_variable_bindings=[(DRINK_CONTAINER_AUX, cup)],
)
],
)
assert generated_tokens(situation) == ("Mom", "drinks", "juice")
def test_mom_eats_cookie():
mom = situation_object(MOM)
cookie = situation_object(COOKIE)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[mom, cookie],
actions=[
Action(EAT, argument_roles_to_fillers=[(AGENT, mom), (PATIENT, cookie)])
],
)
assert generated_tokens(situation) == ("Mom", "eats", "a", "cookie")
def test_ball_fell_on_ground():
ball = situation_object(BALL)
ground = situation_object(GROUND)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[ball, ground],
actions=[Action(FALL, argument_roles_to_fillers=[(THEME, ball)])],
after_action_relations=[on(ball, ground)],
)
assert generated_tokens(situation) == ("a", "ball", "falls", "on", "the", "ground")
def test_mom_sits_on_a_table():
mom = situation_object(MOM)
table = situation_object(TABLE)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[mom, table],
actions=[
Action(
SIT,
argument_roles_to_fillers=[
(AGENT, mom),
(
GOAL,
Region(
table,
direction=GRAVITATIONAL_UP,
distance=EXTERIOR_BUT_IN_CONTACT,
),
),
],
)
],
)
assert generated_tokens(situation) == ("Mom", "sits", "on", "a", "table")
def test_you_give_me_a_cookie():
you = situation_object(DAD, properties=[IS_ADDRESSEE])
baby = situation_object(BABY, properties=[IS_SPEAKER])
cookie = situation_object(COOKIE)
situation_to = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[you, baby, cookie],
actions=[
Action(
GIVE,
argument_roles_to_fillers=[(AGENT, you), (GOAL, baby), (THEME, cookie)],
)
],
)
assert generated_tokens(situation_to) == ("you", "give", "a", "cookie", "to", "me")
situation_ditransitive = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[you, baby, cookie],
actions=[
Action(
GIVE,
argument_roles_to_fillers=[(AGENT, you), (GOAL, baby), (THEME, cookie)],
)
],
syntax_hints=[PREFER_DITRANSITIVE],
)
assert generated_tokens(situation_ditransitive) == (
"you",
"give",
"me",
"a",
"cookie",
)
def test_object_beside_object():
# HACK FOR AXES - See https://github.com/isi-vista/adam/issues/316
ball = situation_object(BALL)
table = situation_object(TABLE)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[ball, table],
always_relations=[
Relation(
IN_REGION,
ball,
Region(
table,
distance=PROXIMAL,
direction=Direction(
positive=True,
relative_to_axis=HorizontalAxisOfObject(table, index=0),
),
),
)
],
)
assert generated_tokens(situation) == ("a", "ball", "beside", "a", "table")
def test_object_behind_in_front_object():
# HACK FOR AXES - See https://github.com/isi-vista/adam/issues/316
box = situation_object(BOX)
table = situation_object(TABLE)
speaker = situation_object(MOM, properties=[IS_SPEAKER])
addressee = situation_object(DAD, properties=[IS_ADDRESSEE])
front_situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[box, table],
other_objects=[speaker, addressee],
always_relations=[
Relation(
IN_REGION,
box,
Region(
table,
distance=PROXIMAL,
direction=Direction(
positive=True, relative_to_axis=FacingAddresseeAxis(table)
),
),
)
],
axis_info=AxesInfo(
addressee=addressee,
axes_facing=[
(
addressee,
# TODO: fix this hack
HorizontalAxisOfObject(obj, index=1).to_concrete_axis( # type: ignore
None
),
)
for obj in [box, table, speaker, addressee]
if obj.axes
],
),
)
assert generated_tokens(front_situation) == ("a", "box", "in front of", "a", "table")
behind_situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[box, table],
other_objects=[speaker, addressee],
always_relations=[
Relation(
IN_REGION,
box,
Region(
table,
distance=PROXIMAL,
direction=Direction(
positive=False, relative_to_axis=FacingAddresseeAxis(table)
),
),
)
],
axis_info=AxesInfo(
addressee=addressee,
axes_facing=[
(
addressee,
# TODO: fix this hack
HorizontalAxisOfObject(obj, index=1).to_concrete_axis( # type: ignore
None
),
)
for obj in [box, table, speaker, addressee]
if obj.axes
],
),
)
assert generated_tokens(behind_situation) == ("a", "box", "behind", "a", "table")
def test_to_regions_as_goal():
goal_object = situation_object(BOX, properties=[HOLLOW])
assert generated_tokens(
region_as_goal_situation(Region(goal_object, distance=PROXIMAL), goal_object)
) == ("a", "dog", "goes", "to", "a", "box")
def test_in_region_as_goal():
goal_object = situation_object(BOX, properties=[HOLLOW])
assert generated_tokens(
region_as_goal_situation(Region(goal_object, distance=INTERIOR), goal_object)
) == ("a", "dog", "goes", "in", "a", "box")
def test_beside_region_as_goal():
goal_object = situation_object(BOX, properties=[HOLLOW])
# Beside
assert generated_tokens(
region_as_goal_situation(
Region(
goal_object,
distance=PROXIMAL,
direction=Direction(
positive=True,
relative_to_axis=HorizontalAxisOfObject(goal_object, index=0),
),
),
goal_object,
)
) == ("a", "dog", "goes", "beside", "a", "box")
# Beside
assert generated_tokens(
region_as_goal_situation(
Region(
goal_object,
distance=PROXIMAL,
direction=Direction(
positive=False,
relative_to_axis=HorizontalAxisOfObject(goal_object, index=0),
),
),
goal_object,
)
) == ("a", "dog", "goes", "beside", "a", "box")
def test_behind_region_as_goal():
goal_object = situation_object(BOX, properties=[HOLLOW])
# Behind
assert generated_tokens(
region_as_goal_situation(
Region(
goal_object,
distance=PROXIMAL,
direction=Direction(
positive=False, relative_to_axis=FacingAddresseeAxis(goal_object)
),
),
goal_object,
)
) == ("a", "dog", "goes", "behind", "a", "box")
def test_in_front_of_region_as_goal():
# In front of
goal_object = situation_object(BOX, properties=[HOLLOW])
assert generated_tokens(
region_as_goal_situation(
Region(
goal_object,
distance=PROXIMAL,
direction=Direction(
positive=True, relative_to_axis=FacingAddresseeAxis(goal_object)
),
),
goal_object,
)
) == ("a", "dog", "goes", "in front of", "a", "box")
def test_over_region_as_goal():
goal_object = situation_object(TABLE)
# Over
assert generated_tokens(
region_as_goal_situation(
Region(goal_object, distance=PROXIMAL, direction=GRAVITATIONAL_UP),
goal_object,
)
) == ("a", "dog", "goes", "over", "a", "table")
def test_under_region_as_goal():
goal_object = situation_object(TABLE)
# Over
assert generated_tokens(
region_as_goal_situation(
Region(goal_object, distance=PROXIMAL, direction=GRAVITATIONAL_DOWN),
goal_object,
)
) == ("a", "dog", "goes", "under", "a", "table")
def test_region_with_out_addressee():
agent = situation_object(DOG)
goal_object = situation_object(BOX, properties=[HOLLOW])
with pytest.raises(RuntimeError):
generated_tokens(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent, goal_object],
actions=[
Action(
GO,
argument_roles_to_fillers=[
(AGENT, agent),
(
GOAL,
Region(
goal_object,
distance=PROXIMAL,
direction=Direction(
positive=True,
relative_to_axis=FacingAddresseeAxis(goal_object),
),
),
),
],
)
],
)
)
def test_is_color_when_dynamic():
agent = situation_object(BALL, properties=[RED])
ground = situation_object(GROUND)
with pytest.raises(RuntimeError):
generated_tokens(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent],
actions=[
Action(
ROLL,
argument_roles_to_fillers=[(AGENT, agent)],
auxiliary_variable_bindings=[(ROLL_SURFACE_AUXILIARY, ground)],
)
],
syntax_hints=[ATTRIBUTES_AS_X_IS_Y],
)
)
def test_is_property_none():
agent = situation_object(BALL, properties=[RED])
with pytest.raises(RuntimeError):
generated_tokens(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent],
syntax_hints=[ATTRIBUTES_AS_X_IS_Y, IGNORE_COLORS],
)
)
def test_multiple_colors():
agent = situation_object(BALL, properties=[RED, BLACK])
with pytest.raises(RuntimeError):
generated_tokens(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent],
syntax_hints=[ATTRIBUTES_AS_X_IS_Y],
)
)
def region_as_goal_situation(
goal: Region[SituationObject], goal_object: SituationObject
) -> HighLevelSemanticsSituation:
agent = situation_object(DOG)
learner = situation_object(LEARNER, properties=[IS_ADDRESSEE])
return HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent, goal_object],
other_objects=[learner],
actions=[Action(GO, argument_roles_to_fillers=[(AGENT, agent), (GOAL, goal)])],
axis_info=AxesInfo(
addressee=learner,
axes_facing=[
(
learner,
# TODO: fix this hack
HorizontalAxisOfObject(obj, index=1).to_concrete_axis( # type: ignore
None
),
)
for obj in [agent, goal_object, learner]
if obj.axes
],
),
)
def test_more_than_one_action():
agent = situation_object(DOG)
box = situation_object(BOX)
situation = HighLevelSemanticsSituation(
salient_objects=[agent],
other_objects=[box],
actions=[
Action(GO, argument_roles_to_fillers=[(AGENT, agent), (GOAL, box)]),
Action(FALL, argument_roles_to_fillers=[(AGENT, box), (GOAL, agent)]),
],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(situation)
def test_multiple_has_relations():
agent = situation_object(MOM)
ball = situation_object(BALL)
cookie = situation_object(COOKIE)
situation = HighLevelSemanticsSituation(
salient_objects=[agent, ball],
always_relations=[has(agent, [ball, cookie])],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(situation)
def test_has_as_verb():
speaker = situation_object(MOM, properties=[IS_SPEAKER])
ball = situation_object(BALL)
box = situation_object(BOX)
speaker_has_ball = HighLevelSemanticsSituation(
salient_objects=[speaker, ball],
always_relations=[has(speaker, ball)],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
speaker_has_ball_on_box = HighLevelSemanticsSituation(
salient_objects=[speaker, ball, box],
always_relations=flatten_relations([has(speaker, ball), on(ball, box)]),
ontology=GAILA_PHASE_1_ONTOLOGY,
)
assert ("I", "have", "my", "ball") == generated_tokens(speaker_has_ball)
assert ("I", "have", "my", "ball", "on", "a", "box") == generated_tokens(
speaker_has_ball_on_box
)
def test_multiple_posession():
speaker = situation_object(MOM, properties=[IS_SPEAKER])
addressee = situation_object(DAD, properties=[IS_ADDRESSEE])
ball = situation_object(BALL)
multiple_possession = HighLevelSemanticsSituation(
salient_objects=[speaker, addressee, ball],
always_relations=[has([speaker, addressee], ball)],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(multiple_possession)
def test_fail_relation():
mom = situation_object(MOM)
ball = situation_object(BALL)
ball_bigger_mom = HighLevelSemanticsSituation(
salient_objects=[mom, ball],
always_relations=[bigger_than(ball, mom)],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(ball_bigger_mom)
def test_multiple_action_heads():
mom = situation_object(MOM)
dad = situation_object(DAD)
box = situation_object(BOX)
mom_and_dad_go_to_box = HighLevelSemanticsSituation(
salient_objects=[mom, dad, box],
actions=[
Action(
GO, argument_roles_to_fillers=[(AGENT, mom), (AGENT, dad), (GOAL, box)]
)
],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(mom_and_dad_go_to_box)
def test_only_goal():
box = situation_object(BOX)
only_goal = HighLevelSemanticsSituation(
salient_objects=[box],
actions=[
Action(GO, argument_roles_to_fillers=[(GOAL, Region(box, distance=PROXIMAL))])
],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
generated_tokens(only_goal)
def test_region_as_theme():
box = situation_object(BOX)
region_as_theme = HighLevelSemanticsSituation(
salient_objects=[box],
actions=[
Action(
FALL, argument_roles_to_fillers=[(THEME, Region(box, distance=PROXIMAL))]
)
],
ontology=GAILA_PHASE_1_ONTOLOGY,
)
with pytest.raises(RuntimeError):
| |
<reponame>ksmit799/POTCO-PS<filename>pirates/quest/DistributedQuestGiver.py<gh_stars>1-10
from panda3d.core import *
from direct.showbase.ShowBaseGlobal import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.showbase.PythonUtil import getShortestRotation
from otp.avatar import Avatar
from otp.otpgui import OTPDialog
from pirates.quest import QuestMenuGUI, QuestConstants, QuestDB, QuestLadderDB
from pirates.quest import QuestParser
from pirates.distributed import InteractGlobals
from pirates.quest import QuestLadderDB
from pirates.piratesbase import PLocalizer
from pirates.piratesbase import Freebooter
from pirates.piratesgui import PDialog
from pirates.quest.QuestDetailGUI import QuestDetailGUI
from pirates.quest.QuestRewardGUI import QuestRewardGUI
from pirates.quest import BranchMenuGUI
from pirates.quest import QuestTaskDNA
from pirates.quest import QuestOffer
from pirates.reputation.ReputationGlobals import getLevelFromTotalReputation
from pirates.quest.DialogTree import *
from pirates.quest.DialogProcessMaster import *
#from libotp import CFSpeech, CFTimeout
# ^ Important?
class DistributedQuestGiver(Avatar.Avatar):
notify = directNotify.newCategory('DistributedQuestGiver')
NoOffer = 0
LadderOffer = 1
QuestOffer = 2
InteractOffer = 3
BranchOffer = 4
QuestIconWorkTexture = None
QuestIconStoryTexture = None
QuestIconProgressTexture = None
QuestIconCompleteTexture = None
QuestIconDontCare = 1
QuestIconStory = 2
QuestIconWork = 3
QuestIconNew = 1
QuestIconProgress = 2
QuestIconComplete = 3
def __init__(self):
self.playingQuestString = False
self.dialogOpen = False
self.newOffer = False
self.offers = None
self.offerType = self.NoOffer
self.dialogFlag = 0
self.firstDialog = True
self.newDialog = False
self.npcMoviePlayer = None
self.quitButton = 0
self.nametagIcon = None
self.nametagIconGlow = None
self.containerId = None
self.dialogAnimSet = None
self.animationIval = None
self.resetQuest = None
self.resetBranch = None
self.selectedOffer = None
self.dialogProcessMaster = None
self.dialogQuestOffer = None
def generate(self):
DistributedQuestGiver.notify.debug('generate(%s)' % self.doId)
self.questMenuGUI = None
self.questDetailGUI = None
self.questRewardGUI = None
self.branchMenuGUI = None
self.questDetailCamera = None
def announceGenerate(self):
DistributedQuestGiver.notify.debug('announceGenerate(%s)' % self.doId)
def disable(self):
DistributedQuestGiver.notify.debug('disable(%s)' % self.doId)
if self.npcMoviePlayer:
self.npcMoviePlayer.cleanup()
self.npcMoviePlayer = None
self.cleanUpQuestMenu()
self.cleanUpQuestDetails()
self.cleanUpBranchMenu()
self.ignore('endDialogNPCInteract')
self.ignore('lastSubtitlePage')
def cleanUpQuestMenu(self):
if self.questMenuGUI:
self.questMenuGUI.destroy()
self.questMenuGUI = None
def cleanUpBranchMenu(self):
self.resetBranch = None
self.selectedOffer = None
if self.branchMenuGUI:
self.branchMenuGUI.destroy()
self.branchMenuGUI = None
def cleanUpQuestDetails(self, hide = False):
if self.questDetailGUI:
if hide:
self.questDetailGUI.hidePanelAndDestroy()
else:
self.questDetailGUI.destroy()
self.questDetailGUI = None
if self.questRewardGUI:
if hide:
self.questRewardGUI.hidePanelAndDestroy()
else:
self.questRewardGUI.destroy()
self.questRewardGUI = None
if not hide and self.questDetailCamera:
self.questDetailCamera.finish()
self.questDetailCamera = None
def delete(self):
DistributedQuestGiver.notify.debug('delete(%s)' % self.doId)
def offerOptions(self):
self.notify.warning('offerOptions() needs override!')
def cancelInteraction(self, av):
self.notify.warning('cancelInteraction() needs override!')
def hasOpenGUI(self):
self.notify.warning('hasOpenGUI() needs override!')
return False
def hasQuestOffers(self):
AvailableQuests = []
inventory = localAvatar.getInventory()
prereqExcludes = base.config.GetString('exclude-prereq-quests', '')
for (questId, questDNA) in QuestDB.QuestDict.items():
if len(prereqExcludes):
if questId in prereqExcludes:
continue
prereqs = questDNA.getPrereqs()
passed = True
for prereq in prereqs:
if not prereq.giverCanGive(self.getUniqueId()):
passed = False
break
if not prereq.avIsReady(localAvatar):
passed = False
break
if questDNA.minLevel > localAvatar.level:
passed = False
break
if not base.cr.questDependency.checkDependency(questId, localAvatar.getQuestLadderHistory(), 1):
passed = False
break
boolWeapLvlCheck = (questDNA.weapLvlType != None) & (questDNA.minWeapLevel > 0)
if boolWeapLvlCheck & (questDNA.minWeapLevel > getLevelFromTotalReputation(questDNA.weapLvlType, inventory.getReputation(questDNA.weapLvlType))[0]):
passed = False
break
if questDNA.getVelvetRoped() and not Freebooter.getPaidStatus(localAvatar.getDoId()):
passed = False
break
if questDNA.getAcquireOnce():
history = localAvatar.getQuestLadderHistory()
questLadderId = base.cr.questDynMap.findQuestLadderInt(questId)
containsLadderId = history.count(questLadderId)
if containsLadderId:
passed = False
break
if questDNA.getHoliday() is not None:
holidayId = questDNA.getHoliday()
if base.cr.newsManager and not base.cr.newsManager.getHoliday(holidayId):
passed = False
break
not base.cr.newsManager.getHoliday(holidayId)
if prereqs and passed:
AvailableQuests.append(questDNA)
continue
if len(AvailableQuests):
inventory = localAvatar.getInventory()
if inventory:
toRemove = []
questList = inventory.getQuestList()
for questDNA in AvailableQuests:
questId = questDNA.getQuestId()
found = False
for quest in questList:
if questId == quest.getQuestId() or localAvatar.questStatus.hasLadderQuestId(questId):
found = True
continue
if found:
toRemove.append(questDNA)
continue
for questDNA in toRemove:
AvailableQuests.remove(questDNA)
for quest in localAvatar.getQuests():
if quest and quest.getTimeLimit() and quest.canBeReturnedTo(self.getQuestGiverId()):
return True
continue
return len(AvailableQuests) > 0
def receiveOffer(self, offerType):
self.newOffer = True
self.offerType = offerType
def clearOffer(self):
self.newOffer = False
self.offerType = self.NoOffer
def displayNewQuests(self):
self.cleanUpQuestDetails()
while len(localAvatar.currentStoryQuests) and localAvatar.currentStoryQuests[0].getGiverId() != self.uniqueId:
if localAvatar.currentStoryQuests[0].getGiverId() == '0':
break
localAvatar.currentStoryQuests.remove(localAvatar.currentStoryQuests[0])
if len(localAvatar.currentStoryQuests):
storyQuest = localAvatar.currentStoryQuests[0]
self.presentQuestGiven(storyQuest)
localAvatar.currentStoryQuests.remove(storyQuest)
def presentOffer(self):
if self.newOffer == False:
while len(localAvatar.currentStoryQuests) and localAvatar.currentStoryQuests[0].getGiverId() != self.uniqueId:
localAvatar.currentStoryQuests.remove(localAvatar.currentStoryQuests[0])
if len(localAvatar.currentStoryQuests):
storyQuest = localAvatar.currentStoryQuests[0]
self.presentQuestGiven(storyQuest)
localAvatar.currentStoryQuests.remove(storyQuest)
return None
if self.offerType == self.QuestOffer:
self.presentQuestOffer(self.offers)
elif self.offerType == self.LadderOffer:
self.presentQuestOffer(self.offers, ladder = True)
elif self.offerType == self.InteractOffer:
self.offerOptions(self.dialogFlag)
elif self.offerType == self.NoOffer:
self.notify.warning('offerType == No Offer')
self.clearOffer()
def setQuestOffer(self, offers):
self.receiveOffer(self.QuestOffer)
self.offers = offers
for quest in localAvatar.getQuests():
if quest.getTimeLimit() and quest.canBeReturnedTo(self.getQuestGiverId()):
questOffer = QuestOffer.QuestTimerResetOffer.create(quest.getQuestId(), localAvatar, timerReset = True)
offers.append(questOffer)
branchParent = quest.getBranchParent(localAvatar)
if branchParent and branchParent.getGiverId() == self.getQuestGiverId():
questOffer = QuestOffer.QuestBranchResetOffer.create(quest.getQuestId(), localAvatar, branchReset = True)
offers.append(questOffer)
continue
if not self.playingQuestString:
self.presentQuestOffer(self.offers)
def setQuestLadderOffer(self, offers, quitButton):
self.receiveOffer(self.LadderOffer)
self.offers = offers
self.quitButton = quitButton
if not self.playingQuestString:
self.presentQuestOffer(self.offers, ladder = True)
def presentQuestOffer(self, offers, ladder = False):
if self.questMenuGUI:
DistributedQuestGiver.notify.warning('setQuestOffer: old questMenu GUI still around')
self.cleanUpQuestMenu()
self.cleanUpQuestDetails()
def handleSelection(offer, self = self, offers = offers):
self.cleanUpQuestMenu()
if offer == QuestConstants.CANCEL_QUEST:
index = QuestConstants.CANCEL_QUEST
else:
index = offers.index(offer)
self.sendOfferResponse(index, ladder)
def handleOption(option, offer):
base.test = self
self.ignore('lastSubtitlePage')
self.adjustNPCCamera('back')
if option == PLocalizer.Accept:
handleSelection(offer)
elif self.questMenuGUI:
self.questMenuGUI.show()
self.cleanUpQuestDetails(hide = True)
def displayQuestDetails(offer):
self.questDetailGUI = QuestDetailGUI(offer, None)
self.questDetailGUI.showPanel()
base.questdet = self.questDetailGUI
def displayBranchDetails(offer):
self.selectedOffer = offer
self.cleanUpQuestDetails()
self.questDetailGUI = QuestDetailGUI(offer, None)
self.questDetailGUI.showPanel()
base.questdet = self.questDetailGUI
def displayBranchOptions(offer, callback, descCallback):
self.branchMenuGUI = BranchMenuGUI.BranchMenuGUI(offer, callback, descCallback)
def handleBranchOption(option):
if option == PLocalizer.Accept:
if self.selectedOffer:
self.sendOfferResponse(0, ladder, self.selectedOffer)
self.adjustNPCCamera('back')
self.cleanUpQuestDetails(hide = True)
self.cleanUpBranchMenu()
if self.questMenuGUI:
self.questMenuGUI.show()
def describeQuest(offer):
self.adjustNPCCamera('forward')
questDNA = offer.getQuestDNA()
if questDNA:
if isinstance(offer, QuestOffer.QuestTimerResetOffer):
self.requestQuestReset(offer.getQuestId())
return None
elif isinstance(offer, QuestOffer.QuestBranchResetOffer):
self.requestBranchReset(offer.getQuestId())
return None
questStr = questDNA.getStringBefore()
if questDNA.isBranch():
self.acceptOnce('lastSubtitlePage', displayBranchOptions, [
offer,
None,
displayBranchDetails])
localAvatar.guiMgr.subtitler.setPageChat(questStr, options = [
PLocalizer.Decline,
PLocalizer.Accept], callback = handleBranchOption)
else:
self.acceptOnce('lastSubtitlePage', displayQuestDetails, [
offer])
localAvatar.guiMgr.subtitler.setPageChat(questStr, options = [
PLocalizer.Decline,
PLocalizer.Accept], callback = handleOption, extraArgs = [
offer])
def questFull(arg):
self.cleanUpQuestMenu()
self.sendOfferResponse(QuestConstants.CANCEL_QUEST, ladder)
inv = base.localAvatar.getInventory()
numWorkQuests = 0
if inv:
questList = inv.getQuestList()
for questId in questList:
if not QuestLadderDB.getFamePath(questId):
numWorkQuests += 1
continue
hasStoryQuest = False
for offer in offers:
if QuestLadderDB.getFamePath(offer.getQuestId()):
hasStoryQuest = True
continue
if not hasStoryQuest and numWorkQuests > QuestConstants.MAXIMUM_MERC_WORK:
self.questMenuGUI = PDialog.PDialog(text = PLocalizer.QuestFull, style = OTPDialog.Acknowledge, command = questFull)
else:
self.questMenuGUI = QuestMenuGUI.QuestMenuGUI(offers, handleSelection, describeQuest)
localAvatar.currentStoryQuests = []
self.clearOffer()
def presentBranchReset(self, declineOption = True):
if not self.resetBranch:
return None
def displayBranchOptions(offer, callback, descCallback):
self.branchMenuGUI = BranchMenuGUI.BranchMenuGUI(offer, callback, descCallback)
def displayBranchDetails(offer):
self.selectedOffer = offer
self.cleanUpQuestDetails()
self.questDetailGUI = QuestDetailGUI(offer, None)
self.questDetailGUI.showPanel()
base.questdet = self.questDetailGUI
subtitleOptions = [
PLocalizer.Accept]
if declineOption:
subtitleOptions = [
PLocalizer.Decline,
PLocalizer.Accept]
localAvatar.guiMgr.subtitler.setPageChat('Is that your choice?', options = subtitleOptions, callback = handleBranchOption)
def handleBranchOption(option):
if option == PLocalizer.Accept:
if self.selectedOffer:
self.sendOfferResponse(0, offer = self.selectedOffer)
else:
self.offerOptions(False)
self.adjustNPCCamera('back')
self.cleanUpQuestDetails(hide = True)
self.cleanUpBranchMenu()
self.cleanUpQuestMenu()
offer = QuestOffer.QuestOffer.create(self.resetBranch.getName(), localAvatar)
if declineOption:
subtitleOptions = [
PLocalizer.Decline]
self.acceptOnce('lastSubtitlePage', displayBranchOptions, [
offer,
None,
displayBranchDetails])
localAvatar.guiMgr.subtitler.setPageChat('', options = subtitleOptions, callback = handleBranchOption)
else:
subtitleOptions = []
displayBranchOptions(offer, None, displayBranchDetails)
self.ignore('doneChatPage')
def presentQuestReset(self):
if not self.resetQuest:
return None
def handleOption(option):
if option == PLocalizer.Accept:
self.resetQuest.startTimer()
self.resetQuest = None
self._DistributedQuestGiver__handleDoneChatPage(0)
self.questDetailGUI = QuestDetailGUI(None, None, self.resetQuest.questDNA)
self.questDetailGUI.showPanel()
localAvatar.guiMgr.subtitler.setPageChat('', options = [
PLocalizer.Accept], callback = handleOption)
def presentQuestGiven(self, quest):
self.resetBranch = None
if self.resetBranch:
container = localAvatar.questStatus.getContainer(quest.getQuestId())
if container.parent and container.parent.getFirstQuestId() == quest.getQuestId():
self.presentBranchReset(False)
return None
else:
self.resetBranch = None
def handleOption(option):
if len(localAvatar.currentStoryQuests):
self.cleanUpQuestDetails(hide = True)
while len(localAvatar.currentStoryQuests) and localAvatar.currentStoryQuests[0].getGiverId() != self.uniqueId:
localAvatar.currentStoryQuests.remove(localAvatar.currentStoryQuests[0])
if len(localAvatar.currentStoryQuests):
storyQuest = localAvatar.currentStoryQuests[0]
self.presentQuestGiven(storyQuest)
localAvatar.currentStoryQuests.remove(storyQuest)
elif hasattr(quest, 'questDNA') and quest.questDNA.getTimeLimit():
quest.startTimer()
self._DistributedQuestGiver__handleDoneChatPage(0)
self.questDetailGUI = QuestDetailGUI(None, None, quest)
self.questDetailGUI.showPanel()
localAvatar.guiMgr.subtitler.setPageChat('', options = [
PLocalizer.Accept], callback = handleOption)
base.questdet = self.questDetailGUI
self.ignore('doneChatPage')
def adjustNPCCamera(self, direction):
dummy = NodePath('dummy')
dummy.reparentTo(camera)
if direction == 'forward':
dummy.setH(dummy, -15)
dummy.setY(dummy, 0.75)
duration = 0.69999999999999996
else:
dummy.setY(dummy, -0.75)
dummy.setH(dummy, 15)
duration = 0.5
dummy.wrtReparentTo(camera.getParent())
(camH, dummyH) = getShortestRotation(camera.getH(), dummy.getH())
self.questDetailCamera = Parallel(LerpFunc(camera.setH, duration = duration, fromData = camH, toData = dummyH, blendType = 'easeInOut'), LerpFunc(camera.setY, duration = duration, fromData = camera.getY(), toData = dummy.getY(), blendType = 'easeInOut'))
dummy.removeNode()
self.questDetailCamera.start()
def getOfferedQuests(self):
return list(self.offers)
def sendOfferResponse(self, index, ladder = False, offer = None):
if index == QuestConstants.CANCEL_QUEST:
self.dialogOpen = False
if offer:
self.sendUpdate('assignBranchOffer', [
offer])
| |
- **MaxConcurrency** *(string) --*
The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.
If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.
- **ComplianceSeverity** *(string) --*
The severity level to assign to the association.
- **Message** *(string) --*
A description of the failure.
- **Fault** *(string) --*
The source of the failure.
:type Entries: list
:param Entries: **[REQUIRED]**
One or more associations.
- *(dict) --*
Describes the association of a Systems Manager SSM document and an instance.
- **Name** *(string) --* **[REQUIRED]**
The name of the SSM document that contains the configuration information for the instance. You can specify Command or Automation documents.
You can specify AWS-predefined documents, documents you created, or a document that is shared with you from another account.
For SSM documents that are shared with you from other AWS accounts, you must specify the complete SSM document ARN, in the following format:
``arn:aws:ssm:*region* :*account-id* :document/*document-name* ``
For example:
``arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document``
For AWS-predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, ``AWS-ApplyPatchBaseline`` or ``My-Document`` .
- **InstanceId** *(string) --*
The ID of the instance.
- **Parameters** *(dict) --*
A description of the parameters for a document.
- *(string) --*
- *(list) --*
- *(string) --*
- **AutomationTargetParameterName** *(string) --*
Specify the target for the association. This target is required for associations that use an Automation document and target resources by using rate controls.
- **DocumentVersion** *(string) --*
The document version.
- **Targets** *(list) --*
The instances targeted by the request.
- *(dict) --*
An array of search criteria that targets instances using a Key,Value combination that you specify. ``Targets`` is required if you don\'t provide one or more instance IDs in the call.
- **Key** *(string) --*
User-defined criteria for sending commands that target instances that meet the criteria. ``Key`` can be ``tag:<Amazon EC2 tag>`` or ``InstanceIds`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting>`__ in the *AWS Systems Manager User Guide* .
- **Values** *(list) --*
User-defined criteria that maps to ``Key`` . For example, if you specified ``tag:ServerRole`` , you could specify ``value:WebServer`` to run a command on instances that include Amazon EC2 tags of ``ServerRole,WebServer`` . For more information about how to send commands that target instances using ``Key,Value`` parameters, see `Using Targets and Rate Controls to Send Commands to a Fleet <https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html>`__ in the *AWS Systems Manager User Guide* .
- *(string) --*
- **ScheduleExpression** *(string) --*
A cron expression that specifies a schedule when the association runs.
- **OutputLocation** *(dict) --*
An Amazon S3 bucket where you want to store the results of this request.
- **S3Location** *(dict) --*
An Amazon S3 bucket where you want to store the results of this request.
- **OutputS3Region** *(string) --*
(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Systems Manager automatically determines the Amazon S3 bucket region.
- **OutputS3BucketName** *(string) --*
The name of the Amazon S3 bucket.
- **OutputS3KeyPrefix** *(string) --*
The Amazon S3 bucket subfolder.
- **AssociationName** *(string) --*
Specify a descriptive name for the association.
- **MaxErrors** *(string) --*
The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 instances and set MaxError to 10%, then the system stops sending the request when the sixth error is received.
Executions that are already running an association when MaxErrors is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won\'t be more than max-errors failed executions, set MaxConcurrency to 1 so that executions proceed one at a time.
- **MaxConcurrency** *(string) --*
The maximum number of targets allowed to run the association at the same time. You can specify a number, for example 10, or a percentage of the target set, for example 10%. The default value is 100%, which means all targets run the association at the same time.
If a new instance starts and attempts to run an association while Systems Manager is running MaxConcurrency associations, the association is allowed to run. During the next association interval, the new instance will process its association within the limit specified for MaxConcurrency.
- **ComplianceSeverity** *(string) --*
The severity level to assign to the association.
:rtype: dict
:returns:
"""
pass
def create_document(self, Content: str, Name: str, Attachments: List = None, VersionName: str = None, DocumentType: str = None, DocumentFormat: str = None, TargetType: str = None, Tags: List = None) -> Dict:
"""
Creates a Systems Manager document.
After you create a document, you can use CreateAssociation to associate it with one or more running instances.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateDocument>`_
**Request Syntax**
::
response = client.create_document(
Content='string',
Attachments=[
{
'Key': 'SourceUrl',
'Values': [
'string',
]
},
],
Name='string',
VersionName='string',
DocumentType='Command'|'Policy'|'Automation'|'Session'|'Package',
DocumentFormat='YAML'|'JSON',
TargetType='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'DocumentDescription': {
'Sha1': 'string',
'Hash': 'string',
'HashType': 'Sha256'|'Sha1',
'Name': 'string',
'VersionName': 'string',
'Owner': 'string',
'CreatedDate': datetime(2015, 1, 1),
'Status': 'Creating'|'Active'|'Updating'|'Deleting'|'Failed',
'StatusInformation': 'string',
'DocumentVersion': 'string',
'Description': 'string',
'Parameters': [
{
'Name': 'string',
'Type': 'String'|'StringList',
'Description': 'string',
'DefaultValue': 'string'
},
],
'PlatformTypes': [
'Windows'|'Linux',
],
'DocumentType': 'Command'|'Policy'|'Automation'|'Session'|'Package',
'SchemaVersion': 'string',
'LatestVersion': 'string',
'DefaultVersion': 'string',
'DocumentFormat': 'YAML'|'JSON',
'TargetType': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'AttachmentsInformation': [
{
'Name': 'string'
},
]
}
}
**Response Structure**
- *(dict) --*
- **DocumentDescription** *(dict) --*
Information about the Systems Manager document.
- **Sha1** *(string) --*
The SHA1 hash of the document, which you can use for verification.
- **Hash** *(string) --*
The Sha256 or Sha1 hash created by the system when the document was created.
.. note::
Sha1 hashes have been deprecated.
- **HashType** *(string) --*
The hash type of the document. Valid values include ``Sha256`` or ``Sha1`` .
.. note::
Sha1 hashes have been deprecated.
- **Name** *(string) --*
The name of the Systems Manager document.
- **VersionName** *(string) --*
The version of the artifact associated with the document.
- **Owner** *(string) --*
The AWS user account that created the document.
- **CreatedDate** *(datetime) --*
The date when the document was created.
- **Status** *(string) --*
The status of the Systems Manager document.
- **StatusInformation** *(string) --*
A message returned by AWS Systems Manager that explains the ``Status`` value. For example, a ``Failed`` status might be explained by the ``StatusInformation`` message, "The specified S3 bucket does not exist. Verify that the URL of the S3 bucket is correct."
- **DocumentVersion** *(string) --*
The document version.
- **Description** *(string) --*
A description of the document.
- **Parameters** *(list) --*
A description of the parameters for a document.
- *(dict) --*
Parameters specified in a System Manager | |
import subprocess, ffmpy, pycountry, unidecode, shutil, re, requests, json, os, argparse, time, sys, base64, configparser, glob, pycaption, m3u8
from collections import OrderedDict
from natsort import natsorted
from titlecase import titlecase
from pydisney.disneyplus_api import DSNP
from pydisney.disneyplus_parser import Parser
from pydisney.m3u8_formater import M3U8
from pydisney.disneyplus_login import LOGIN
from pydisney.disneyplus_muxer import Muxer
import pydisney.namehelper as namer
from pywidevine.decrypt.wvdecrypt import WvDecrypt
parser = argparse.ArgumentParser(description='>>> DISNEY+ <<<')
parser.add_argument("--url", dest="disneyurl", help="If set, The DSNP viewable URL.")
parser.add_argument('-q', action="store", dest='customquality', help="For configure quality of video.", default=0)
parser.add_argument("--atmos", dest="atmos", help="If set, return atmos audio manifest", action="store_true")
parser.add_argument("--only-2ch-audio", dest="only_2ch_audio", help="If set, to force get only eac3 2.0 Ch audios.", action="store_true")
parser.add_argument("--hevc", dest="hevc", help="If set, return hevc video manifest", action="store_true")
parser.add_argument("--hdr", dest="hdr", help="If set, return uhd_hdr video manifest", action="store_true")
parser.add_argument("--uhd", dest="uhd", help="If set, return uhd video manifest", action="store_true")
parser.add_argument('--default-audio-mux', action='store', dest='default_audio_mux', help='set default audio language mux, default value is eng.', default=0)
parser.add_argument('--default-sub-mux', action='store', dest='default_sub_mux', help='set default sub language mux, default value is eng.', default=0)
parser.add_argument("--all-season", dest="all_season", help="If set, season pack download.", action="store_true")
parser.add_argument("-e", "--episode", dest="episode", help="If set, it will start downloading the season from that episode.")
parser.add_argument("-s", dest="season", help="If set, it will start downloading the from that season.")
parser.add_argument("-o", "--output", dest="outputfolder", help="If set, it will download all assets to directory provided.")
parser.add_argument("--alang", "--audio-language", dest="audiolang", nargs="*", help="If set, download only selected audio languages", default=[])
parser.add_argument("--slang", "--subtitle-language", dest="sublang", nargs="*", help="If set, download only selected subtitle languages", default=[])
parser.add_argument("--flang", "--forced-language", dest="forcedlang", nargs="*", help="If set, download only selected forced subtitle languages", default=[])
parser.add_argument("--license", dest="license", help="If set, print keys and exit.", action="store_true")
parser.add_argument("--nv", "--no-video", dest="novideo", help="If set, don't download video", action="store_true")
parser.add_argument("--na", "--no-audio", dest="noaudio", help="If set, don't download audio", action="store_true")
parser.add_argument("--ns", "--no-subs", dest="nosubs", help="If set, don't download subs", action="store_true")
parser.add_argument("--keep", dest="keep", help="If set, well keep all files after mux, by default all erased.", action="store_true")
parser.add_argument("--group", "--gr", dest="group", help="Tag.", action="store")
parser.add_argument("--txtkeys", dest="txtkeys", help="If set, read keys from txt.", action="store_true")
args = parser.parse_args()
Config = configparser.ConfigParser(interpolation=None)
currentFile = 'Disney+'
realPath = os.path.realpath(currentFile)
dirPath = os.path.dirname(realPath)
dirName = os.path.basename(dirPath)
mp4decryptexe = dirPath + "/bin/mp4decrypt.exe"
mkvmergeexe = dirPath + "/bin/mkvmerge.exe"
aria2cexe = dirPath + "/bin/aria2c.exe"
ffmpegpath = dirPath + '/bin/ffmpeg.exe'
SubtitleEditexe = dirPath + '/bin/SE363/SubtitleEdit.exe'
mp4dumptexe = dirPath + '/bin/mp4dump.exe'
KEYS_Folder = dirPath + '/KEYS'
KEYS_Text = dirPath + '/KEYS/KEYS.txt'
token_file = dirPath + "/token.ini"
DsnpCFG = dirPath + "/dsnp.cfg"
proxy_user = {
'proxy': '---',
'email': '---',
'passwd': '---'
}
proxies = {
"http": "http://{email}:{passwd}@{proxy}".format(
email=proxy_user['email'],
passwd=proxy_<PASSWORD>['passwd'],
proxy=proxy_user['proxy']
),
"https": "http://{email}:{passwd}@{proxy}".format(
email=proxy_user['email'],
passwd=<PASSWORD>['<PASSWORD>'],
proxy=proxy_user['proxy']
)
}
if os.path.exists(DsnpCFG):
Config.read(DsnpCFG)
DSNP_EMAIL = Config.get("config", "email")
DSNP_PASS = Config.get("config", "pass")
else:
print("\ndsnp.cfg File is missing.")
sys.exit()
global account_info
account_info = {
'email': DSNP_EMAIL,
'pass': DSNP_PASS
}
def load(m3u8):
is2ch=False
m3u8_main = m3u8[0].replace('/mickey/', '/')
atmos_m3u8 = m3u8[1]
load_manifest = Parser(m3u8_main, atmos_m3u8, is2ch=is2ch)
videoList, AudioList, subtitleList, forcedlist, AudioExtension = load_manifest.Parser()
return videoList, AudioList, subtitleList, forcedlist, AudioExtension
def get_pssh(url):
widevine_pssh = None
m3u8_obj = m3u8.load(url)
for key in m3u8_obj.keys:
if key is not None and key.keyformat == "urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed":
widevine_pssh = key.uri
if widevine_pssh is not None:
widevine_pssh = widevine_pssh.partition('base64,')[2]
return widevine_pssh
return False
def find_str(s, char):
index = 0
if char in s:
c = char[0]
for ch in s:
if ch == c:
if s[index:index + len(char)] == char:
pass
return index
index += 1
return -1
def getKeyId(mp4_file):
KID_dict = {}
KID_list = []
data = subprocess.check_output([mp4dumptexe, '--format', 'json', '--verbosity', '1', mp4_file])
mp4dump = json.loads(data)
for atom in mp4dump:
if atom['name'] == 'moov':
for children in atom['children']:
if children['name'] == 'trak':
for trak in children['children']:
if trak['name'] == 'mdia':
for mdia in trak['children']:
if mdia['name'] == 'minf':
for minf in mdia['children']:
if minf['name'] == 'stbl':
for stbl in minf['children']:
if stbl['name'] == 'stsd':
for stsd in stbl['children']:
if stsd['name'] == 'encv':
for encv in stsd['children']:
if encv['name'] == 'sinf':
for sinf in encv['children']:
if sinf['name'] == 'schi':
for schi in sinf['children']:
default_KID = schi['default_KID'].replace(' ', '').replace('[', '').replace(']', '').lower()
KID_upper = default_KID.upper()
KID_upper = KID_upper[0:8] + '-' + KID_upper[8:12] + '-' + KID_upper[12:16] + '-' + KID_upper[16:20] + '-' + KID_upper[20:32]
KID_dict = {'name':schi['name'],
'default_KID':default_KID,
'KID_alt':KID_upper}
KID_list.append(KID_dict)
if KID_list:
KID = KID_list[-1]['default_KID']
KID_alt = KID_list[-1]['KID_alt']
else:
KID = 'nothing'
KID_alt = 'nothing'
print(KID)
return (KID)
def generate_token():
print('\nGenerate token...')
LOG = LOGIN(email=account_info['email'], password=account_info['<PASSWORD>'], proxies={})
TOKEN, EXPIRE = LOG.GetAuthToken()
print("Done!")
return TOKEN, EXPIRE
def save_token(token, expire_in):
print('\nSaving token...')
current_time = int(time.time())
expire_date = current_time + expire_in
token_dump = {'token': token, 'expire_date': str(expire_date)}
if os.path.exists(token_file):
os.remove(token_file)
with open(token_file, 'w') as tok:
tok.write(json.dumps(token_dump))
print("Done!")
return
def load_token_file():
print('\nLoading token...')
if not os.path.exists(token_file):
print(f'Error!: token file not found.')
return False
else:
current_time = int(time.time())
with open(token_file, 'r') as tok:
token = json.loads(tok.read())
token_time = int(token['expire_date'])
token_less_10min = token_time - 600
#~ check if token expired.
if current_time > token_time:
print('Error: token is expired.')
return False
#~ check if token will be expired within 10 minutes.
elif current_time > token_less_10min:
print('Warning: token will be expired within 10 min.')
return False
else:
try:
print('Done: expire in: ' + str(int((int(token['expire_date']) - int(time.time())) / 60)) + ' min')
except Exception:
pass
Token = token['token']
return Token
def do_decrypt(pssh):
wvdecrypt = WvDecrypt(pssh)
challenge = wvdecrypt.get_challenge()
resp = requests.post(
url='https://global.edge.bamgrid.com/widevine/v1/obtain-license',
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36',
'Authorization': f'Bearer {AuthorizationToken}'
},
data=challenge
)
license_b64 = base64.b64encode(resp.content)
wvdecrypt.update_license(license_b64)
keys = wvdecrypt.start_process()
return keys
def ReplaceDontLikeWord(X):
try:
X = X.replace(" : ", " - ").replace(": ", " - ").replace(":", " - ").replace("&", "and").replace("+", "").replace(";", "").replace("ó", "o").\
replace("[", "").replace("'", "").replace("]", "").replace("/", "").replace("//", "").\
replace("’", "'").replace("*", "x").replace("<", "").replace(">", "").replace("|", "").\
replace("~", "").replace("#", "").replace("%", "").replace("{", "").replace("}", "").replace(",","").\
replace("?","").encode('latin-1').decode('latin-1')
except Exception:
X = X.decode('utf-8').replace(" : ", " - ").replace(": ", " - ").replace(":", " - ").replace("&", "and").replace("+", "").replace(";", "").\
replace("ó", "o").replace("[", "").replace("'", "").replace("]", "").replace("/", "").\
replace("//", "").replace("’", "'").replace("*", "x").replace("<", "").replace(">", "").replace(",","").\
replace("|", "").replace("~", "").replace("#", "").replace("%", "").replace("{", "").replace("}", "").\
replace("?","").encode('latin-1').decode('latin-1')
return titlecase(X)
def FixShowName(name):
x = name
try:
try:
x = ReplaceDontLikeWord(unidecode.unidecode(name))
except Exception:
x = ReplaceDontLikeWord(name)
except Exception:
pass
return x
def FixSeq(seq):
if int(len(str(seq))) == 1:
return f'0{str(seq)}'
return str(seq)
def StripInputInt(inputint):
x = inputint
if int(x[0]) == 0:
stripped_x = x[1:]
else:
stripped_x = x
return str(stripped_x)
def do_clean(CurrentName):
try:
os.system('if exist "' + CurrentName + '*.mp4" (del /q /f "' + CurrentName + '*.mp4")')
os.system('if exist "' + CurrentName + '*.h265" (del /q /f "' + CurrentName + '*.h265")')
os.system('if exist "' + CurrentName + '*.h264" (del /q /f "' + CurrentName + '*.h264")')
os.system('if exist "' + CurrentName + '*.eac3" (del /q /f "' + CurrentName + '*.eac3")')
os.system('if exist "' + CurrentName + '*.m4a" (del /q /f "' + CurrentName + '*.m4a")')
os.system('if exist "' + CurrentName + '*.ac3" (del /q /f "' + CurrentName + '*.ac3")')
os.system('if exist "' + CurrentName + '*.srt" (del /q /f "' + CurrentName + '*.srt")')
os.system('if exist "' + CurrentName + '*.vtt" (del /q /f "' + CurrentName + '*.vtt")')
os.system('if exist "' + CurrentName + '*.txt" (del /q /f "' + CurrentName + '*.txt")')
os.system('if exist "' + CurrentName + '*.aac" (del /q /f "' + CurrentName + '*.aac")')
os.system('if exist "' + CurrentName + '*.m3u8" (del /q /f "' + CurrentName + '*.m3u8")')
except Exception:
pass
return
def PRINT(videoList, AudioList, subtitleList):
try:
print('\nVIDEO')
for i in videoList:
print('VIDEO' + ' - Bitrate: ' + i['bitrate'] + 'kbps | Codec: ' + i['codec'] + ' | Resolution: ' + i['resolution'])
print('\nAUDIO')
for i in AudioList:
print('AUDIO' + ' - Bitrate: ' + i['bitrate'] + 'kbps | Codec: ' + i['codec'] + ' | Channels: ' + i['channels'] + ' | Language: ' + i['language'])
print('\nSUBS')
for s in subtitleList:
code = s['code']
lang = s['language']
print(f'SUBS - Language: {lang} | ISO 639-2: {code}')
except Exception:
pass
return
def demux(inputName, outputName, inpType):
if ishevc or ishdr or isuhd and inpType == 'video':
os.rename(inputName, outputName)
return
ff = ffmpy.FFmpeg(
executable=ffmpegpath,
inputs={inputName: None},
outputs={outputName: '-c copy'},
global_options="-y -hide_banner -loglevel warning"
)
ff.run()
time.sleep (50.0/1000.0)
return True
def build_commandline_list(KEYS):
keycommand = []
keycommand.append('--key')
keycommand.append(KEYS)
return keycommand
def decryptmedia(KEYS, inputName, outputName):
cmd_dec = [mp4decryptexe.replace('\\', '/')]
cmd_keys = build_commandline_list(KEYS)
cmd = cmd_dec + cmd_keys
cmd.append(inputName)
cmd.append(outputName)
wvdecrypt_process = subprocess.Popen(cmd)
stdoutdata, stderrdata = wvdecrypt_process.communicate()
wvdecrypt_process.wait()
return True
def vtt2srt(vtt, srt):
with open(vtt, "r", encoding="utf8") as f: subs = f.read()
text = pycaption.SRTWriter().write(pycaption.WebVTTReader().read(subs))
with open(srt, "w", encoding="utf8") as f: f.write(text)
return
def updt(total, progress, textname):
barLength, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(barLength * progress))
text = "\rMerging: {} [{}] {:.0f}% {}".format(
textname, "#" * block + "-" * (barLength - block), round(progress * 100, 0),
status)
sys.stdout.write(text)
sys.stdout.flush()
def downloadsubs(url, output):
print("Downloading %s" % output)
baseurl = url.rsplit('/', 1)[0] + '/'
manifest = requests.get(url).text
segments = re.findall('^(?!#).*',manifest,re.MULTILINE)
segments = list(dict.fromkeys(segments))
segments = [baseurl+x for x in segments]
if 'MAIN' in manifest:
segments = [x for x in segments if 'MAIN' in x]
temp_vtt = output.replace('.srt', '.vtt')
open_vtt = open(temp_vtt , "wb")
for url in segments:
response = requests.get(url)
open_vtt.write(response.content)
open_vtt.close()
if 'sdh' in temp_vtt:
vtt2srt(temp_vtt, output)
if os.path.isfile(temp_vtt) and os.path.isfile(output):
os.remove(temp_vtt)
else:
vtt2srt(temp_vtt, temp_vtt)
return
def download(url, output):
txturls = output + '_links_.txt'
baseurl = url.rsplit('/', 1)[0] + '/'
manifest = requests.get(url).text
dict_m3u8 = M3U8(manifest)
media_segment = dict_m3u8.media_segment
segments = []
frags_path = []
if 'MAIN' in manifest:
for seg in media_segment:
if seg.get('EXT-X-MAP') is not None and 'MAIN' in seg.get('EXT-X-MAP').get('URI'):
segments.append(baseurl+seg.get('EXT-X-MAP').get('URI'))
segments.append(baseurl+seg.get('URI'))
if seg.get('EXT-X-MAP') is None and 'MAIN' in seg.get('URI'):
segments.append(baseurl+seg.get('URI'))
else:
for seg in media_segment:
if seg.get('EXT-X-MAP') is not None:
segments.append(baseurl+seg.get('EXT-X-MAP').get('URI'))
segments.append(baseurl+seg.get('URI'))
if seg.get('EXT-X-MAP') is None:
segments.append(baseurl+seg.get('URI'))
if segments == []:
print('no segments found!!!!')
return
segments = list(dict.fromkeys(segments))
txt = open(txturls,"w+")
for i, s in enumerate(segments):
name = "0" + str(i) + '.mp4'
frags_path.append(name)
txt.write(s + f"\n out={name}\n")
txt.close()
aria2c_command = [
aria2cexe,
f'--input-file={txturls}',
'-x16',
'-j16',
'-s16',
'--summary-interval=0',
'--retry-wait=3',
'--max-tries=10',
'--enable-color=false',
'--download-result=hide',
'--console-log-level=error'
]
subprocess.run(aria2c_command)
print('Done!\n')
runs = int(len(frags_path))
openfile = open(output ,"wb")
for run_num, fragment in enumerate(frags_path):
if os.path.isfile(fragment):
shutil.copyfileobj(open(fragment,"rb"),openfile)
os.remove(fragment)
updt(runs, run_num + 1, output)
openfile.close()
#os.remove(txturls)
print('Done!')
return
def subtitleformatter(name):
subs = glob.glob(name + "*.vtt")
if subs != []:
subprocess.call([SubtitleEditexe, "/convert", name + '*.vtt', "srt", "/removetextforhi", "/fixcommonerrors", "/overwrite"])
for s in subs:
if os.path.isfile(s):
os.remove(s)
return
def main(episodename, seasonfolder, m3u8Url, SHOW=True):
print("\nParsing M3U8...")
videoList, AudioList, subtitleList, forcedlist, AudioExtension = load(m3u8Url)
print("Done!")
print(f"\n{episodename}")
PRINT(videoList, AudioList, subtitleList+forcedlist)
if not args.license:
if args.customquality:
height = args.customquality
quality_available = [int(x['height']) for x in | |
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,disable_multitouch')
Config.set('kivy','exit_on_escape', 0)
Config.set('kivy', 'desktop', 1)
Config.set('graphics', 'window_state', 'maximized')
Config.write()
from kivymd.app import MDApp
from kivymd.uix.datatables import MDDataTable
from kivymd.uix.snackbar import BaseSnackbar
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import Screen
from kivy.properties import StringProperty
from kivy.core.window import Window
from kivy.metrics import dp
from kivymd.uix.button import MDFillRoundFlatButton, MDFlatButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.list import ThreeLineIconListItem, TwoLineIconListItem, OneLineIconListItem, ThreeLineAvatarIconListItem
import tiempo as Tiempo
import sqlite3
import os
import getpass
HOME_DIR = str(os.getcwd())
USER_NAME = getpass.getuser()
TIME_FORMAT = "%Y-%m-%d %I:%M:%S %p %Z"
DATABASE = f'{HOME_DIR}/{USER_NAME}.db'
class ContentNavigationDrawer(BoxLayout): user = f'User: {USER_NAME}'
class ListItemWithDelete(OneLineIconListItem): icon = "delete"
class TwoListItemWithDelete(TwoLineIconListItem): icon = "delete"
class ThreeListItemWithDelete(ThreeLineIconListItem): icon = "delete"
class InventoryListItem(TwoLineIconListItem): icon = StringProperty("check-bold")
class DialogContentTracking(BoxLayout): pass
class MySnackbar(BaseSnackbar):
text = StringProperty('')
icon = StringProperty("information-outline")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.snackbar_x = "10dp"
self.snackbar_y = "10dp"
self.radius = [10,10,10,10]
self.size_hint_x = (Window.width - (self.snackbar_x * 2)) / Window.width
def display(self, message, icon="information-outline", duration: int=3):
self.text = message
self.icon = icon
self.duration = duration
self.open()
class MainLayout(Screen):
_receiving_serial_dub = set()
_shipping_serial_dub = set()
_inventory_count_dub = set()
total_devices = None
_stockroom_total = None
stockroom = None
inventory_results = {
'correct': 0,
'incorrect': [],
'no_inventory': [],
}
_inventory = {}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._snack_bar = MySnackbar()
self.CONN = sqlite3.connect(DATABASE)
c = self.CONN.cursor()
with self.CONN:
c.execute(("CREATE TABLE IF NOT EXISTS wiping ("
"id INTEGER PRIMARY KEY, "
"serial_number TEXT NOT NULL, "
"date DATE, "
"method TEXT, "
"employee_id TEXT);"))
c.execute(("CREATE TABLE IF NOT EXISTS receiving_detail "
"(id INTEGER PRIMARY KEY, "
"tracking TEXT NOT NULL, "
"date DATE, "
"method TEXT, "
"computers INTEGER, "
"phone INTEGER, "
"tablet INTEGER, "
"hotspots INTEGER, "
"employee_id TEXT);"))
c.execute(("CREATE TABLE IF NOT EXISTS receiving_serials "
"(id INTEGER PRIMARY KEY, "
"serial_number TEXT, "
"receiving_detail_id INTEGER NOT NULL, "
"FOREIGN KEY (receiving_detail_id) "
"REFERENCES receiving_detail (id));"))
c.execute(("CREATE TABLE IF NOT EXISTS inventory "
"(id INTEGER PRIMARY KEY, "
"serial_number TEXT NOT NULL, "
"date DATE, "
"stockroom TEXT, "
"updated_by TEXT, "
"receiving_detail_id INTEGER, "
"FOREIGN KEY (receiving_detail_id) "
"REFERENCES receiving_detail (id));"))
c.execute(("CREATE TABLE IF NOT EXISTS shipping_details "
"(id INTEGER PRIMARY KEY, "
"tracking TEXT NOT NULL, "
"date DATE, "
"company TEXT, "
"employee TEXT);"))
c.execute(("CREATE TABLE IF NOT EXISTS shipping_serials "
"(id INTEGER PRIMARY KEY, "
"serial_numbers TEXT NOT NULL, "
"shipping_details_id INTEGER, "
"FOREIGN KEY (shipping_details_id) "
"REFERENCES shipping_details (id));"))
c.execute(("CREATE TABLE IF NOT EXISTS inventory_results "
"(id INTEGER PRIMARY KEY, "
"stockroom TEXT, "
"total_scanned INTEGER, "
"accuracy INTEGER, "
"correct INTEGER, "
"missing INTEGER, "
"incorrect INTEGER, "
"added INTEGER, "
"date DATE, "
"employee_id TEXT);"))
def search_autocomplete(self, instance: object) -> None:
self.search_table.update_row_data(self.search_table, [])
if not instance.text:
return None
sql = (
"SELECT receiving_serials.serial_number, receiving_detail.tracking, "
"receiving_detail.date, receiving_detail.method, receiving_detail.employee_id "
"FROM receiving_serials "
"INNER JOIN receiving_detail "
"ON receiving_detail.id = receiving_serials.receiving_detail_id "
"WHERE receiving_serials.serial_number LIKE ? "
"ORDER BY receiving_detail.date;"
)
try:
with self.CONN:
result = self.CONN.execute(sql, (f'%{instance.text}%',)).fetchone()
except sqlite3.Error:
pass
else:
if result:
table_data=(
(result[0],
result[1],
Tiempo.from_string(result[2]),
result[3],
result[4]),
('','','','',''),
)
self.search_table.update_row_data(self.search_table, table_data)
def search_change_screen(self, target: str, direction: str="right") -> None:
"""Changes the current screen to any specified screen.
Args:
target (str): Target screen.
direction (str): Transition direction.
"""
self.ids.screen_manager.transition.direction = direction
self.ids.screen_manager.current = target
def on_kv_post(self, base_widget):
"""Add tables once KV file has loaded."""
self.search_table = MDDataTable(
size_hint = (1, 1),
use_pagination = False,
pagination_menu_pos = 'auto',
elevation = 0,
check = False,
rows_num = 1000,
column_data = [
("Serial Number", dp(70)),
("Tracking", dp(70)),
("Date", dp(45)),
('Received In', dp(45)),
('Employee', dp(25))
],
row_data = []
)
self.ids.search_table_container.add_widget(self.search_table)
return super().on_kv_post(base_widget)
def inventory_count_details(self, date=None, instance=None, search=False):
self.ids.screen_manager.transition.direction = 'left'
self.ids.screen_manager.current = 'Inventory Count Details'
if not search:
self.inventory_date = date
def display_all(stockroom, accuracy, date):
self.ids.inventory_count_details.data.append(
{
"viewclass": "ThreeLineListItem",
"text": stockroom,
"secondary_text": f"Accuracy: {round(accuracy,2)}%",
"tertiary_text": f"Date: {Tiempo.from_string(date)}"
}
)
sql = (
"SELECT stockroom, accuracy, date "
"FROM inventory_results "
"WHERE date(date, 'localtime') LIKE ? "
"ORDER BY date;"
)
self.ids.inventory_count_details.data = []
c = self.CONN.cursor()
try:
with self.CONN:
c.execute(sql, (self.inventory_date,))
except sqlite3.Error:
self._snack_bar.display(message="Something wen't wrong. Couln't connect to database.")
else:
for stockroom, accuracy, date in c:
if search:
if instance.text in stockroom.upper():
display_all(stockroom, accuracy, date)
else:
display_all(stockroom, accuracy, date)
def inventory_count_result(self, instance=None, search=False):
def display_all(date, average, total):
self.ids.inventory_count_results.data.append(
{
"viewclass": "ThreeLineListItem",
"text": date,
"secondary_text": f"Accuracy: {round(average,2)}%",
"tertiary_text": f"Total rooms scanned: {total}",
"on_release": lambda: self.inventory_count_details(date)
}
)
sql = (
"SELECT date(date, 'localtime'), avg(accuracy), count(*) "
"FROM inventory_results "
"GROUP BY date(date, 'localtime') "
"ORDER BY date(date);"
)
self.ids.inventory_count_results.data = []
c = self.CONN.cursor()
try:
with self.CONN:
c.execute(sql)
except sqlite3.Error:
self._snack_bar.display(message="Something wen't wrong. Couln't connect to database.")
else:
for date, average, total in c:
if search:
if instance.text in date:
display_all(date, average, total)
else:
display_all(date, average, total)
def inventory_all(self, stockroom: str=None, instance: object=None, search: bool=False):
self.ids.screen_manager.transition.direction = 'left'
self.ids.screen_manager.current = 'Inventory Details'
if not search:
self.stockroom = stockroom
def display_all(serial, date, updated):
self.ids.inventory_table_all.data.append(
{
"viewclass": "ThreeLineListItem",
"text": serial,
"secondary_text": f"Updated: {Tiempo.from_string(date)}",
"tertiary_text": f"Updated by: {updated}"
}
)
sql = (
"SELECT serial_number, date, updated_by "
"FROM inventory "
"WHERE stockroom LIKE ? "
"ORDER BY serial_number;"
)
self.ids.inventory_table_all.data = []
c = self.CONN.cursor()
try:
with self.CONN:
c.execute(sql, (self.stockroom,))
except sqlite3.Error:
self._snack_bar.display(message="Something wen't wrong. Couln't connect to database.")
else:
for serial, date, updated in c:
if search:
if instance.text in serial:
display_all(serial, date, updated)
else:
display_all(serial, date, updated)
def inventory_screen(self, instance: object=None, search: bool=False) -> None:
"""Update inventory view with up-to-date data."""
c = self.CONN.cursor()
def display(stockroom, total, max):
self.ids.inventory_table.data.append(
{
"viewclass": "ThreeLineListItem",
"text": stockroom,
"secondary_text": "Available Inventory: " + str(total),
"tertiary_text": f"Last updated on: {Tiempo.from_string(max)}",
"on_release": lambda: self.inventory_all(stockroom)
}
)
self.ids.inventory_table.data = []
try:
with self.CONN:
c.execute("SELECT stockroom, count(*), max(date) FROM inventory GROUP BY stockroom;")
except sqlite3.Error:
self._snack_bar.display(message="Something wen't wrong. Couln't connect to database.")
else:
for stockroom, total, max in c:
if search and instance.text:
if instance.text in stockroom.upper():
display(stockroom, total, max)
else:
display(stockroom, total, max)
def wipe_get_previos(self) -> None:
"""Updated the wipe screen with serial numbers wiped today."""
self.ids.wiping_container.data = []
c = self.CONN.cursor()
try:
with self.CONN:
c.execute(
(
"SELECT wiping.serial_number, wiping.date, wiping.method, inventory.id "
"FROM wiping "
"LEFT JOIN inventory "
"ON wiping.serial_number = inventory.serial_number "
"WHERE datetime(wiping.date, 'localtime') >= "
"datetime('now', 'localtime', 'start of day') "
"ORDER BY wiping.date DESC;"
)
)
except sqlite3.Error:
self._snack_bar.display(message='Something went wrong. Coun\'t connect to database.')
else:
for serial, date, method, inventory in c:
if inventory:
self.ids.wiping_container.data.append(
{
"viewclass": "ThreeListItemWithDelete",
"text": serial,
"secondary_text": f"Date: {Tiempo.from_string(date)}",
"tertiary_text": f"Method: {method}"
}
)
else:
self.ids.wiping_container.data.append(
{
"viewclass": "ThreeLineIconListItem",
"text": serial,
"secondary_text": f"Date: {Tiempo.from_string(date)}",
"tertiary_text": f"Method: {method}"
}
)
def commit_receiving(self) -> None:
"""Commits the receiving data to the database."""
date = Tiempo.now()
computer = 0 if not self.ids.computer_count.text else int(self.ids.computer_count.text)
phone = 0 if not self.ids.phone_count.text else int(self.ids.phone_count.text)
tablet = 0 if not self.ids.tablet_count.text else int(self.ids.tablet_count.text)
hotspot = 0 if not self.ids.hotspot_count.text else int(self.ids.hotspot_count.text)
method = self.ids.receiving_method.text
tracking = self.ids.receiving_tracking.text
sql_detail = "INSERT INTO receiving_detail VALUES(?,?,?,?,?,?,?,?,?);"
sql_serial = "INSERT INTO receiving_serials VALUES(?,?,?);"
sql_inventory = "INSERT INTO inventory VALUES(?,?,?,?,?,?);"
serial_vals = []
inventory_vals = []
detail_vals = (None, tracking, date, method, computer, phone, tablet, hotspot, USER_NAME,)
try:
c = self.CONN.cursor()
with self.CONN:
detail_id = c.execute(sql_detail, detail_vals).lastrowid
for item in self._receiving_serial_dub:
serial_vals.append((None, item, detail_id,))
inventory_vals.append((None, item, date, 'Ready-Stock', USER_NAME, detail_id))
else:
c.executemany(sql_serial, serial_vals)
c.executemany(sql_inventory, inventory_vals)
except sqlite3.Error:
self._snack_bar.display(message='Something went wrong. Nothing was saved.')
else:
self._snack_bar.display(message='Sucessfuly saved.')
self.clear_receiving()
def wipe_commit(self, instance: object=None) -> None:
date = Tiempo.now()
victor_state = self.ids.victor_state.text
# Check if a wipe method was selected
# Display an error and end execution if nothing was selected
if victor_state == 'Select Method':
self._snack_bar.display(message="Please select a [b]Method[/b] first.")
return None
# Check if the scanned serial number is currently in inventory
sql_inventory = "SELECT id FROM inventory WHERE serial_number = ?;"
sql_wipe = (
"SELECT id "
"FROM wiping "
"WHERE serial_number = ? "
"AND datetime(date, 'localtime') >= "
"datetime('now', 'localtime', 'start of day');"
)
try:
with self.CONN:
inventory_id = self.CONN.execute(sql_inventory, (instance.text,)).fetchone()
wipe_id = self.CONN.execute(sql_wipe,(instance.text,)).fetchone()
except sqlite3.Error:
self._snack_bar.display(message="Something wen't wrong. Couln't connect | |
"""Tests for SequenceSessionizerSketchPlugin."""
from __future__ import unicode_literals
import mock
from timesketch.lib.analyzers.psexec_sessionizers \
import DestPsexecSessionizerSketchPlugin
from timesketch.lib.analyzers.sequence_sessionizer \
import SequenceSessionizerSketchPlugin
from timesketch.lib.testlib import BaseTest
from timesketch.lib.testlib import MockDataStore
from timesketch.lib.analyzers.base_sessionizer_test import _create_eventObj
class ManyEventsSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Mock sequence sessionizer class with many events in the event_seq."""
session_type = 'many_events_seq_sessionizer'
max_time_diff_micros = 100
return_fields = ['hostname', 'source_short', 'timestamp']
event_seq = [{
'hostname': 'host',
'source_short': 'FILE'
}, {
'hostname': 'host',
'source_short': 'WEBHIST'
}]
class OneEventSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Mock sequence sessionizer class with one event in the event_seq."""
session_type = 'one_event_seq_sessionizer'
max_time_diff_micros = 100
event_seq = [{'hostname': 'host', 'source_short': 'FILE'}]
return_fields = ['hostname', 'source_short', 'timestamp']
# Invalid sequence sessionizers.
class NoneSeqSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. event_seq should not be None, everything
else is valid."""
session_type = 'valid_name'
event_seq = None
return_fields = ['timestamp']
class EmptySeqSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. event_seq should not be [], everything else
is valid."""
session_type = 'valid_name'
event_seq = []
return_fields = ['timestamp']
class NoTimestampSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. return_fields should include 'timestamp',
everything else is valid."""
session_type = 'valid_name'
event_seq = [{'hostname': 'host', 'source_short': 'FILE'}]
return_fields = ['hostname', 'source_short']
class MissingAttrSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. return_fields doesn't includes all needed
attributes, everything else is valid."""
session_type = 'valid_name'
event_seq = [{'hostname': 'host', 'source_short': 'FILE'}]
return_fields = ['timestamp']
class NoneSessionTypeSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. session_type should not be None, everything
else is valid."""
session_type = None
event_seq = [{'hostname': 'host', 'source_short': 'FILE'}]
return_fields = ['timestamp', 'hostname', 'source_short']
class EmptyStrSessionTypeSequenceSessionizer(SequenceSessionizerSketchPlugin):
"""Invalid sequence sessionizer. session_type should not be empty string,
everything else is valid."""
session_type = ''
event_seq = [{'hostname': 'host', 'source_short': 'FILE'}]
return_fields = ['timestamp', 'hostname', 'source_short']
class TestValidSequenceSessionizerPlugin(BaseTest):
"""Tests the validation functionality of the sequence sessionizing sketch
analyzer."""
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_event_seq_none(self):
"""Test event_seq is not None."""
index = 'test_index'
sketch_id = 1
sessionizer = NoneSeqSequenceSessionizer(index, sketch_id)
with self.assertRaises(ValueError):
sessionizer.run()
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_event_seq_empty(self):
"""Test event_seq is not empty."""
index = 'test_index'
sketch_id = 1
sessionizer = EmptySeqSequenceSessionizer(index, sketch_id)
with self.assertRaises(ValueError):
sessionizer.run()
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_no_timestamp(self):
"""Test missing timestamp attribute is added in return_fields.
The sessionizer should be validated automatically when calling
sessionizer.run()."""
index = 'test_index'
sketch_id = 1
sessionizer = NoTimestampSequenceSessionizer(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 0, [], [0])
self.assertNotIn('timestamp', sessionizer.return_fields)
sessionizer.run()
self.assertIn('timestamp', sessionizer.return_fields)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_missing_attr(self):
"""Test missing attributes added in return_fields.
The sessionizer should be validated automatically when calling
sessionizer.run()."""
index = 'test_index'
sketch_id = 1
sessionizer = MissingAttrSequenceSessionizer(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 0, [], [0])
for event in sessionizer.event_seq:
for attr in event:
self.assertNotIn(attr, sessionizer.return_fields)
sessionizer.run()
for event in sessionizer.event_seq:
for attr in event:
self.assertIn(attr, sessionizer.return_fields)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_session_type_none(self):
"""Test session_type is not None."""
index = 'test_index'
sketch_id = 1
sessionizer = NoneSessionTypeSequenceSessionizer(index, sketch_id)
with self.assertRaises(ValueError):
sessionizer.run()
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_session_type_empty_str(self):
"""Test session_type is not empty string."""
index = 'test_index'
sketch_id = 1
sessionizer = EmptyStrSessionTypeSequenceSessionizer(index, sketch_id)
with self.assertRaises(ValueError):
sessionizer.run()
class TestManyEventsSequenceSessionizerPlugin(BaseTest):
"""Tests base functionality of sequence sessionizing sketch analyzers with
many events in the even_seq which are listed in seq_sessionizer_classes.
Attributes:
seq_sessionizer_classes: A list of sequence sessionizer classes to
test.
"""
seq_sessionizer_classes = [
ManyEventsSequenceSessionizer,
DestPsexecSessionizerSketchPlugin
]
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_sessionizer(self):
"""Test basic sequence sessionizer functionality."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
self.assertIsInstance(sessionizer, seq_sessionizer_class)
self.assertEqual(index, sessionizer.index_name)
self.assertEqual(sketch_id, sessionizer.sketch.id)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_one_session(self):
"""Test one sequence of events is finded and allocated as a session."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
2,
seq_sessionizer_class.event_seq,
time_diffs=[1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 1'.
format(sessionizer.session_type))
# Events that are not part of the sequence but are between
# significant events from the event sequence considered as a session
# are part of the significant events' session.
for i in range(0, 101):
event = datastore.event_store[str(i)]
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type],
1)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_multiple_sessions(self):
"""Test multiple sessions are found and allocated correctly."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
4,
seq_sessionizer_class.event_seq +
seq_sessionizer_class.event_seq,
time_diffs=[1, 1, 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 2'.
format(sessionizer.session_type))
for i in range(0, 100):
event = datastore.event_store[str(i)]
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type],
1)
# Events with id in the range of 101 to 201 are not part of any
# session.
for i in range(202, 302):
event = datastore.event_store[str(i)]
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type],
2)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_after_session(self):
"""Test events after the last event of a sequence are not allocated with
a session number if they are not part from another session."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
4,
seq_sessionizer_class.event_seq +
seq_sessionizer_class.event_seq,
time_diffs=[1, 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 2'.
format(sessionizer.session_type))
# Session 1: events with id from 0 to 101,
# session 2: events with id from 202 to 303.
for i in range(102, 201):
event = datastore.event_store[str(i)]
self.assertNotIn('session_id', event['_source'])
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_edge_time_diff(self):
"""Test events with the edge time difference between them are
allocated correctly."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(
datastore,
0,
2,
seq_sessionizer_class.event_seq,
time_diffs=[seq_sessionizer_class.max_time_diff_micros])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 1'.
format(sessionizer.session_type))
for i in range(0, 101):
event = datastore.event_store[str(i)]
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type],
1)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_above_max_time_diff(self):
"""Test events with max time difference + 1 between them are allocated
correctly."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(
datastore,
0,
2,
seq_sessionizer_class.event_seq,
time_diffs=[seq_sessionizer_class.max_time_diff_micros + 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 0'.
format(sessionizer.session_type))
# Events with id 0 and id 101 form the requested sequence, but
# event with id 100 and 101 have max_time_diff_micros + 1 bewtween
# them
for i in range(0, 201):
event = datastore.event_store[str(i)]
self.assertNotIn('session_id', event['_source'])
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_zero_events(self):
"""Test the behaviour of the sequence sessionizer when given zero
events."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 0, [], [0])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 0'.
format(sessionizer.session_type))
class TestOneEventSequenceSessionizerPlugin(BaseTest):
"""Tests base functionality of sequence sessionizing sketch analyzers with
one event in the even_seq which are listed in seq_sessionizer_classes.
Attributes:
seq_sessionizer_classes: A list of sequence sessionizer classes to
test.
"""
seq_sessionizer_classes = [OneEventSequenceSessionizer]
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_sessionizer(self):
"""Test basic sequence sessionizer functionality."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
self.assertIsInstance(sessionizer, seq_sessionizer_class)
self.assertEqual(index, sessionizer.index_name)
self.assertEqual(sketch_id, sessionizer.sketch.id)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_one_session(self):
"""Test one sequence of events is finded and allocated as a session."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore, 0, 1,
seq_sessionizer_class.event_seq)
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 1'.
format(sessionizer.session_type))
# Event with id 0 is the significant for the event_seq event.
event = datastore.event_store['0']
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type], 1)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_multiple_sessions(self):
"""Test multiple sessions are finded and allocated correctly."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = seq_sessionizer_class(index, sketch_id)
sessionizer.datastore.client = mock.Mock()
datastore = sessionizer.datastore
_create_mock_event(datastore,
0,
2,
seq_sessionizer_class.event_seq +
seq_sessionizer_class.event_seq,
time_diffs=[1, 1, 1])
message = sessionizer.run()
self.assertEqual(
message,
'Sessionizing completed, number of {0:s} sessions created: 2'.
format(sessionizer.session_type))
# Session 1: events with id 0.
event = datastore.event_store['0']
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type], 1)
# Session 2: events with id 101.
event = datastore.event_store['101']
self.assertEqual(
event['_source']['session_id'][sessionizer.session_type], 2)
@mock.patch('timesketch.lib.analyzers.interface.OpenSearchDataStore',
MockDataStore)
def test_after_session(self):
"""Test events after the last event of a sequence are not allocated with
a session number if they are not part from another session."""
index = 'test_index'
sketch_id = 1
for seq_sessionizer_class in self.seq_sessionizer_classes:
sessionizer = | |
list(user_ids_by_email.values())
if framework_constants.DELETED_USER_ID in user_ids:
raise exceptions.InputException(
'Reserved deleted_user_id found in deletion request and'
'should not be deleted')
if not user_ids:
logging.info('Emails %r not found in DB. No users deleted', emails)
return
# The operations made in the methods below can be limited.
# We can adjust 'limit' as necessary to avoid timing out.
self.services.issue_star.ExpungeStarsByUsers(
self.mc.cnxn, user_ids, limit=limit)
self.services.project_star.ExpungeStarsByUsers(
self.mc.cnxn, user_ids, limit=limit)
self.services.hotlist_star.ExpungeStarsByUsers(
self.mc.cnxn, user_ids, limit=limit)
self.services.user_star.ExpungeStarsByUsers(
self.mc.cnxn, user_ids, limit=limit)
for user_id in user_ids:
self.services.user_star.ExpungeStars(
self.mc.cnxn, user_id, commit=False, limit=limit)
self.services.features.ExpungeQuickEditsByUsers(
self.mc.cnxn, user_ids, limit=limit)
self.services.features.ExpungeSavedQueriesByUsers(
self.mc.cnxn, user_ids, limit=limit)
self.services.template.ExpungeUsersInTemplates(
self.mc.cnxn, user_ids, limit=limit)
self.services.config.ExpungeUsersInConfigs(
self.mc.cnxn, user_ids, limit=limit)
self.services.project.ExpungeUsersInProjects(
self.mc.cnxn, user_ids, limit=limit)
# The upcoming operations cannot be limited with 'limit'.
# So it's possible that these operations below may lead to timing out
# and ExpungeUsers will have to run again to fully delete all users.
# We commit the above operations here, so if a failure does happen
# below, the second run of ExpungeUsers will have less work to do.
if commit:
self.mc.cnxn.Commit()
affected_issue_ids = self.services.issue.ExpungeUsersInIssues(
self.mc.cnxn, user_ids_by_email, limit=limit)
# Commit ExpungeUsersInIssues here, as it has many operations
# and at least one operation that cannot be limited.
if commit:
self.mc.cnxn.Commit()
self.services.issue.EnqueueIssuesForIndexing(
self.mc.cnxn, affected_issue_ids)
# Spam verdict and report tables have user_id columns that do not
# reference User. No limit will be applied.
self.services.spam.ExpungeUsersInSpam(self.mc.cnxn, user_ids)
if commit:
self.mc.cnxn.Commit()
# No limit will be applied for expunging in hotlists.
self.services.features.ExpungeUsersInHotlists(
self.mc.cnxn, user_ids, self.services.hotlist_star, self.services.user,
self.services.chart)
if commit:
self.mc.cnxn.Commit()
# No limit will be applied for expunging in UserGroups.
self.services.usergroup.ExpungeUsersInGroups(
self.mc.cnxn, user_ids)
if commit:
self.mc.cnxn.Commit()
# No limit will be applied for expunging in FilterRules.
deleted_rules_by_project = self.services.features.ExpungeFilterRulesByUser(
self.mc.cnxn, user_ids_by_email)
rule_strs_by_project = filterrules_helpers.BuildRedactedFilterRuleStrings(
self.mc.cnxn, deleted_rules_by_project, self.services.user, emails)
if commit:
self.mc.cnxn.Commit()
# We will attempt to expunge all given users here. Limiting the users we
# delete should be done before work_env.ExpungeUsers is called.
self.services.user.ExpungeUsers(self.mc.cnxn, user_ids)
if commit:
self.mc.cnxn.Commit()
self.services.usergroup.group_dag.MarkObsolete()
for project_id, filter_rule_strs in rule_strs_by_project.items():
project = self.services.project.GetProject(self.mc.cnxn, project_id)
hostport = framework_helpers.GetHostPort(
project_name=project.project_name)
send_notifications.PrepareAndSendDeletedFilterRulesNotification(
project_id, hostport, filter_rule_strs)
def TotalUsersCount(self):
"""Returns the total number of Users in Monorail."""
return self.services.user.TotalUsersCount(self.mc.cnxn)
def GetAllUserEmailsBatch(self, limit=1000, offset=0):
"""Returns a list emails that belong to Users in Monorail.
Returns:
A list of emails for Users within Monorail ordered by the user.user_ids.
The list will hold at most [limit] emails and will start at the given
[offset].
"""
return self.services.user.GetAllUserEmailsBatch(
self.mc.cnxn, limit=limit, offset=offset)
### Group methods
# FUTURE: CreateGroup()
# FUTURE: ListGroups()
# FUTURE: UpdateGroup()
# FUTURE: DeleteGroup()
### Hotlist methods
def CreateHotlist(
self, name, summary, description, editor_ids, issue_ids, is_private,
default_col_spec):
# type: (string, string, string, Collection[int], Collection[int], Boolean,
# string)
"""Create a hotlist.
Args:
name: a valid hotlist name.
summary: one-line explanation of the hotlist.
description: one-page explanation of the hotlist.
editor_ids: a list of user IDs for the hotlist editors.
issue_ids: a list of issue IDs for the hotlist issues.
is_private: True if the hotlist can only be viewed by owners and editors.
default_col_spec: default columns for the hotlist's list view.
Returns:
The newly created hotlist.
Raises:
HotlistAlreadyExists: A hotlist with the given name already exists.
InputException: No user is signed in or the proposed name is invalid.
PermissionException: If the user cannot view all of the issues.
"""
if not self.mc.auth.user_id:
raise exceptions.InputException('Anon cannot create hotlists.')
# GetIssuesDict checks that the user can view all issues.
self.GetIssuesDict(issue_ids)
if not framework_bizobj.IsValidHotlistName(name):
raise exceptions.InputException(
'%s is not a valid name for a Hotlist' % name)
if self.services.features.LookupHotlistIDs(
self.mc.cnxn, [name], [self.mc.auth.user_id]):
raise features_svc.HotlistAlreadyExists()
with self.mc.profiler.Phase('creating hotlist %s' % name):
hotlist = self.services.features.CreateHotlist(
self.mc.cnxn, name, summary, description, [self.mc.auth.user_id],
editor_ids, issue_ids=issue_ids, is_private=is_private,
default_col_spec=default_col_spec, ts=int(time.time()))
return hotlist
def UpdateHotlist(
self, hotlist_id, hotlist_name=None, summary=None, description=None,
is_private=None, default_col_spec=None, owner_id=None,
add_editor_ids=None):
# type: (int, str, str, str, bool, str, int, Collection[int]) -> None
"""Update the given hotlist.
If a new value is None, the value does not get updated.
Args:
hotlist_id: hotlist_id of the hotlist to update.
hotlist_name: proposed new name for the hotlist.
summary: new summary for the hotlist.
description: new description for the hotlist.
is_private: true if hotlist should be updated to private.
default_col_spec: new default columns for hotlist list view.
owner_id: User id of the new owner.
add_editor_ids: User ids to add as editors.
Raises:
InputException: The given hotlist_id is None or proposed new name is not
a valid hotlist name.
NoSuchHotlistException: There is no hotlist with the given ID.
PermissionException: The logged-in user is not allowed to update
this hotlist's settings.
NoSuchUserException: Some proposed editors or owner were not found.
HotlistAlreadyExists: The (proposed new) hotlist owner already owns a
hotlist with the same (proposed) name.
"""
hotlist = self.services.features.GetHotlist(
self.mc.cnxn, hotlist_id, use_cache=False)
if not permissions.CanAdministerHotlist(
self.mc.auth.effective_ids, self.mc.perms, hotlist):
raise permissions.PermissionException(
'User is not allowed to update hotlist settings.')
if hotlist.name == hotlist_name:
hotlist_name = None
if hotlist.owner_ids[0] == owner_id:
owner_id = None
if hotlist_name and not framework_bizobj.IsValidHotlistName(hotlist_name):
raise exceptions.InputException(
'"%s" is not a valid hotlist name' % hotlist_name)
# Check (new) owner does not already own a hotlist with the (new) name.
if hotlist_name or owner_id:
owner_ids = [owner_id] if owner_id else None
if self.services.features.LookupHotlistIDs(
self.mc.cnxn, [hotlist_name or hotlist.name],
owner_ids or hotlist.owner_ids):
raise features_svc.HotlistAlreadyExists(
'User already owns a hotlist with name %s' %
hotlist_name or hotlist.name)
# Filter out existing editors and users that will be added as owner
# or is the current owner.
next_owner_id = owner_id or hotlist.owner_ids[0]
if add_editor_ids:
new_editor_ids_set = {user_id for user_id in add_editor_ids if
user_id not in hotlist.editor_ids and
user_id != next_owner_id}
add_editor_ids = list(new_editor_ids_set)
# Validate user change requests.
user_ids = []
if add_editor_ids:
user_ids.extend(add_editor_ids)
else:
add_editor_ids = None
if owner_id:
user_ids.append(owner_id)
if user_ids:
self.services.user.LookupUserEmails(self.mc.cnxn, user_ids)
# Check for other no-op changes.
if summary == hotlist.summary:
summary = None
if description == hotlist.description:
description = None
if is_private == hotlist.is_private:
is_private = None
if default_col_spec == hotlist.default_col_spec:
default_col_spec = None
if ([hotlist_name, summary, description, is_private, default_col_spec,
owner_id, add_editor_ids] ==
[None, None, None, None, None, None, None]):
logging.info('No updates given')
return
if (summary is not None) and (not summary):
raise exceptions.InputException('Hotlist cannot have an empty summary.')
if (description is not None) and (not description):
raise exceptions.InputException(
'Hotlist cannot have an empty description.')
if default_col_spec is not None and not framework_bizobj.IsValidColumnSpec(
default_col_spec):
raise exceptions.InputException(
'"%s" is not a valid column spec' % default_col_spec)
self.services.features.UpdateHotlist(
self.mc.cnxn, hotlist_id, name=hotlist_name, summary=summary,
description=description, is_private=is_private,
default_col_spec=default_col_spec, owner_id=owner_id,
add_editor_ids=add_editor_ids)
# TODO(crbug/monorail/7104): delete UpdateHotlistRoles.
def GetHotlist(self, hotlist_id, use_cache=True):
# int, Optional[bool] -> Hotlist
"""Return the specified hotlist.
Args:
hotlist_id: int hotlist_id of the hotlist to retrieve.
use_cache: set to false when doing read-modify-write.
Returns:
The specified hotlist.
Raises:
NoSuchHotlistException: There is no hotlist with that ID.
PermissionException: The user is not allowed to view the hotlist.
"""
if hotlist_id is None:
raise exceptions.InputException('No hotlist specified')
with self.mc.profiler.Phase('getting hotlist %r' % hotlist_id):
hotlist = self.services.features.GetHotlist(
self.mc.cnxn, hotlist_id, use_cache=use_cache)
self._AssertUserCanViewHotlist(hotlist)
return hotlist
# TODO(crbug/monorail/7104): Remove group_by_spec argument and pre-pend
# values to sort_spec.
def ListHotlistItems(self, hotlist_id, max_items, start, can, sort_spec,
group_by_spec, use_cache=True):
# type: (int, int, int, int, str, str, bool) -> ListResult
"""Return a list of HotlistItems for the given hotlist that
are visible by the user.
Args:
hotlist_id: int hotlist_id of the hotlist.
max_items: int the maximum number of HotlistItems we want to return.
start: int start position in the total sorted items.
can: int "canned_query" number to scope the visible issues.
sort_spec: string that lists the sort order.
group_by_spec: string that lists the grouping order.
use_cache: set to false when doing read-modify-write.
Returns:
A work_env.ListResult namedtuple.
Raises:
NoSuchHotlistException: There is no hotlist with that ID.
InputException: `max_items` or `start` are negative values.
PermissionException: The user is not allowed to view the hotlist.
"""
hotlist = self.GetHotlist(hotlist_id, use_cache=use_cache)
if start < 0:
raise exceptions.InputException('Invalid `start`: %d' % start)
if max_items < 0:
raise exceptions.InputException('Invalid `max_items`: %d' % max_items)
hotlist_issues = self.services.issue.GetIssues(
self.mc.cnxn, [item.issue_id for item in hotlist.items])
project_ids = hotlist_helpers.GetAllProjectsOfIssues(hotlist_issues)
config_list = | |
"""
Access to the platform 2.0 API for PV monitoring
Works for m.ginlong.com. Should also work for the myevolvecloud.com portal (not tested)
For more information: https://github.com/hultenvp/solis-sensor/
"""
import binascii
import hashlib
from http import HTTPStatus
import logging
import struct
import sys
import asyncio
from datetime import datetime, timedelta
import logging
import aiohttp
import async_timeout
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
# REFRESH CONSTANTS
"""Schedule next call after (minutes)."""
SCHEDULE_OK = 2
"""When an error occurred, new call after (minutes)."""
SCHEDULE_NOK = 1
_LOGGER = logging.getLogger(__name__)
# VERSION
VERSION = '0.2.1'
# Don't login every time
HRS_BETWEEN_LOGIN = timedelta(hours=2)
# Response constants
SUCCESS = 'Success'
CONTENT = 'Content'
STATUS_CODE = 'StatusCode'
MESSAGE = 'Message'
# Status constants
ONLINE = 'Online'
OFFLINE = 'Offline'
MAX_CONSECUTIVE_FAILURES = 10
"""
[unit of measurement, key, type, decimal precision]
"""
PORTAL_INVERTER_CONST = {
'INV_DEVICE_ID': [None, 'zf', str, None],
'INV_DATALOGGER_SERIAL': [None, 'za', str, None],
'INV_TEMPERATURE': ['°C', '1df',float, 1],
'LINE_1_VDC': ['V', '1a', float, 2],
'LINE_2_VDC': ['V', '1b', float, 2],
'LINE_3_VDC': ['V', '1c', float, 2],
'LINE_4_VDC': ['V', '1d', float, 2],
'LINE_1_ADC': ['A', '1j', float, 2],
'LINE_2_ADC': ['A', '1k', float, 2],
'LINE_3_ADC': ['A', '1l', float, 2],
'LINE_4_ADC': ['A', '1m', float, 2],
'LINE_1_PDC': ['W', '1s', float, 2],
'LINE_2_PDC': ['W', '1t', float, 2],
'LINE_3_PDC': ['W', '1u', float, 2],
'LINE_4_PDC': ['W', '1v', float, 2],
'PHASE_1_VAC': ['V', '1af', float, 2],
'PHASE_2_VAC': ['V', '1ag', float, 2],
'PHASE_3_VAC': ['V', '1ah', float, 2],
'PHASE_1_AAC': ['A', '1ai', float, 2],
'PHASE_2_AAC': ['A', '1aj', float, 2],
'PHASE_3_AAC': ['A', '1ak', float, 2],
'INV_POWER_AC': ['W', '1ao', float, 2],
'INV_FREQ_AC': ['Hz', '1ar', float, 2],
'INV_ENERGY_LAST_MONTH': ['kWh', '1ru', float, 2],
'INV_ENERGY_TODAY': ['kWh', '1bd', float, 2],
'INV_ENERGY_THIS_MONTH': ['kWh', '1be', float, 2],
'INV_ENERGY_THIS_YEAR': ['kWh', '1bf', float, 2],
'INV_ENERGY_TOTAL_LIFE': ['kWh', '1bc', float, 2],
'BAT_REMAINING_CAPACITY': ['%', '1cv', float, 2],
'BAT_TOTAL_ENERGY_CHARGED': ['kWh', '1cx', float, 2],
'BAT_TOTAL_ENERGY_DISCHARGED': ['kWh', '1cy', float, 2],
'BAT_DAILY_ENERGY_CHARGED': ['kWh', '1cz', float, 2],
'BAT_DAILY_ENERGY_DISCHARGED': ['kWh', '1da', float, 2],
'GRID_DAILY_ON_GRID_ENERGY': ['kWh', '1bw', float, 2],
'GRID_DAILY_ENERGY_PURCHASED': ['kWh', '1bx', float, 2],
'GRID_DAILY_ENERGY_USED': ['kWh', '1co', float, 2],
'GRID_MONTHLY_ENERGY_PURCHASED':['kWh', '1bz', float, 2],
'GRID_MONTHLY_ENERGY_USED': ['kWh', '1cp', float, 2],
'GRID_YEARLY_ENERGY_PURCHASED': ['kWh', '1cb', float, 2],
'GRID_YEARLY_ENERGY_USED': ['kWh', '1cq', float, 2],
'GRID_TOTAL_ON_GRID_ENERGY': ['kWh', '1bu', float, 2],
'GRID_TOTAL_CONSUMPTION_ENERGY':['kWh', '1cn', float, 2],
'GRID_TOTAL_POWER': ['W', '1bq', float, 2],
'GRID_TOTAL_CONSUMPTION_POWER':['W', '1cj', float, 2],
'GRID_TOTAL_ENERGY_USED': ['kWh', '1bv', float, 2],
}
class PortalConfig:
""" Portal configuration data """
def __init__(self, portal_domain, portal_username, portal_password, portal_plantid, inverter_sn):
self._domain = portal_domain
self._username = portal_username
self._password = <PASSWORD>
self._plantid = portal_plantid
self._inverter_serial = inverter_sn
@property
def domain(self):
return self._domain
@property
def username(self):
return self._username
@property
def password(self):
return self._password
@property
def plantid(self):
return self._plantid
@property
def inverter_serial(self):
return self._inverter_serial
class InverterData(object):
""" Representation of a Platform 2.0 data object used for retrieving data values. """
def __init__(self, portal_config, hass, devices):
""" Initialize Solis data component. """
self._last_updated = None
self._energy_yesterday = 0
self._status = OFFLINE
self._devices = devices
self.hass = hass
self.interface_portal = PortalAPI(hass, portal_config)
self._sensor_data = {type: None for type in PORTAL_INVERTER_CONST}
async def update_devices(self):
""" Update all registered sensors. """
if not self._devices:
return
# Update all devices
for dev in self._devices:
dev.data_updated(self)
def get_inverter_attributes(self):
""" Return an array with the sensors and their values. """
return self._sensor_data
def get_inverter_attribute(self, attribute_key):
""" Return an attribute's latest value """
return self._sensor_data[attribute_key][0]
def get_inverter_attribute_uom(self, attribute_key):
""" Return an attribute's unit of measurement"""
return PORTAL_INVERTER_CONST[attribute_key][0]
def _get_float(self, data, key, precision = 2):
""" Retrieve 'key' from 'data' as type float with precision 'precision' """
result = None
data_raw = data.get(key)
if (data_raw is not None):
data_float = float(data_raw)
# Round to specified precision
result = round(data_float, precision)
return result
def _get_string(self, data, key):
""" Retrieve 'key' from 'data as type string """
return data.get(key)
def _update_attributes(self):
""" Update the PV attributes with received portal data. """
status = OFFLINE
if (self.interface_portal.is_online()):
portaldata = self.interface_portal.get_portal_data()
if (portaldata is not None):
#_LOGGER.debug("Data received: %s", portaldata)
data = portaldata['result']['deviceWapper']['dataJSON']
# We're online and we have data, so update last_updated
# Energy_today is not reset at midnight, but in the morning at sunrise when the inverter switches back on
# Returning zero instead of received value until we start receiving fresh values at dawn
# Not sure if this works in polar regions ;-)
if (self._last_updated is not None):
if (self._last_updated.day is not datetime.now().day):
# Take snapshot
self._energy_yesterday = self._sensor_data['INV_ENERGY_TODAY']
self._last_updated = datetime.now()
status = ONLINE
# Fetch all attributes from payload
for attribute in PORTAL_INVERTER_CONST:
key = PORTAL_INVERTER_CONST[attribute][1]
type = PORTAL_INVERTER_CONST[attribute][2]
precision = PORTAL_INVERTER_CONST[attribute][3]
if(key == None):
# Do nothing, no key
pass
else:
if(type == str):
self._sensor_data[attribute] = self._get_string(data, key)
elif(type == float):
self._sensor_data[attribute] = self._get_float(data, key, precision)
self._status = status
async def async_update(self, *_):
"""Update the data from PV Portal."""
result = await self.interface_portal.async_update()
if (result == SCHEDULE_OK):
self._update_attributes()
await self.update_devices()
await self.schedule_update(result)
async def schedule_update(self, minute=1):
""" Schedule an update after minute minutes. """
_LOGGER.debug("Scheduling next update in %s minutes.", minute)
nxt = dt_util.utcnow() + timedelta(minutes=minute)
async_track_point_in_utc_time(self.hass, self.async_update, nxt)
@property
def status(self):
return self._status
@property
def last_updated(self):
return self._last_updated
@property
def serial(self):
return self.interface_portal.config.inverter_serial
@property
def temperature(self):
return self._sensor_data['INV_TEMPERATURE']
@property
def dcinputvoltagepv1(self):
return self._sensor_data['LINE_1_VDC']
@property
def dcinputvoltagepv2(self):
return self._sensor_data['LINE_2_VDC']
@property
def dcinputvoltagepv3(self):
return self._sensor_data['LINE_3_VDC']
@property
def dcinputvoltagepv4(self):
return self._sensor_data['LINE_4_VDC']
@property
def dcinputcurrentpv1(self):
return self._sensor_data['LINE_1_ADC']
@property
def dcinputcurrentpv2(self):
return self._sensor_data['LINE_2_ADC']
@property
def dcinputcurrentpv3(self):
return self._sensor_data['LINE_3_ADC']
@property
def dcinputcurrentpv4(self):
return self._sensor_data['LINE_4_ADC']
@property
def dcinputpowerpv1(self):
return self._sensor_data['LINE_1_PDC']
@property
def dcinputpowerpv2(self):
return self._sensor_data['LINE_2_PDC']
@property
def dcinputpowerpv3(self):
return self._sensor_data['LINE_3_PDC']
@property
def dcinputpowerpv4(self):
return self._sensor_data['LINE_4_PDC']
@property
def acoutputvoltage1(self):
return self._sensor_data['PHASE_1_VAC']
@property
def acoutputvoltage2(self):
return self._sensor_data['PHASE_2_VAC']
@property
def acoutputvoltage3(self):
return self._sensor_data['PHASE_3_VAC']
@property
def acoutputcurrent1(self):
return self._sensor_data['PHASE_1_AAC']
@property
def acoutputcurrent2(self):
return self._sensor_data['PHASE_2_AAC']
@property
def acoutputcurrent3(self):
return self._sensor_data['PHASE_3_AAC']
@property
def actualpower(self):
return self._sensor_data['INV_POWER_AC']
@property
def acfrequency(self):
return self._sensor_data['INV_FREQ_AC']
@property
def energylastmonth(self):
return self._sensor_data['INV_ENERGY_LAST_MONTH']
@property
def energytoday(self):
energy = self._sensor_data['INV_ENERGY_TODAY']
# if energy today is still the same as energy yesterday then the
# portal has not yet reset energy_today.
if (energy == self._energy_yesterday):
energy = 0
else:
# reset energy_yesterday and use today's value.
self._energy_yesterday = 0
return energy;
@property
def energythismonth(self):
return self._sensor_data['INV_ENERGY_THIS_MONTH']
@property
def energythisyear(self):
return self._sensor_data['INV_ENERGY_THIS_YEAR']
@property
def energytotal(self):
return self._sensor_data['INV_ENERGY_TOTAL_LIFE']
@property
def device_id(self):
return self._sensor_data['INV_DEVICE_ID']
@property
def datalogger_serial(self):
return self._sensor_data['INV_DATALOGGER_SERIAL']
@property
def batcapacityremaining(self):
return self._sensor_data['BAT_REMAINING_CAPACITY']
@property
def battotalenergycharged(self):
return self._sensor_data['BAT_TOTAL_ENERGY_CHARGED']
@property
def battotalenergydischarged(self):
return self._sensor_data['BAT_TOTAL_ENERGY_DISCHARGED']
@property
def batdailyenergycharged(self):
return self._sensor_data['BAT_DAILY_ENERGY_CHARGED']
@property
def batdailyenergydischarged(self):
return self._sensor_data['BAT_DAILY_ENERGY_DISCHARGED']
@property
def griddailyongridenergy(self):
return self._sensor_data['GRID_DAILY_ON_GRID_ENERGY']
@property
def griddailyenergypurchased(self):
return self._sensor_data['GRID_DAILY_ENERGY_PURCHASED']
@property
def griddailyenergyused(self):
return self._sensor_data['GRID_DAILY_ENERGY_USED']
@property
def gridmonthlyenergypurchased(self):
return self._sensor_data['GRID_MONTHLY_ENERGY_PURCHASED']
@property
def gridmonthlyenergyused(self):
return self._sensor_data['GRID_MONTHLY_ENERGY_USED']
@property
def gridyearlyenergypurchased(self):
return self._sensor_data['GRID_YEARLY_ENERGY_PURCHASED']
@property
def gridyearlyenergyused(self):
return self._sensor_data['GRID_YEARLY_ENERGY_USED']
@property
def gridtotalongridenergy(self):
return self._sensor_data['GRID_TOTAL_ON_GRID_ENERGY']
@property
def gridtotalconsumptionenergy(self):
return self._sensor_data['GRID_TOTAL_CONSUMPTION_ENERGY']
@property
def gridpowergridtotalpower(self):
return self._sensor_data['GRID_TOTAL_POWER']
@property
def gridtotalconsumptionpower(self):
return self._sensor_data['GRID_TOTAL_CONSUMPTION_POWER']
@property
def gridtotalenergyused(self):
return self._sensor_data['GRID_TOTAL_ENERGY_USED']
class PortalAPI():
""" Class with functions for reading data from the Platform 2.0 portal. """
def __init__(self, hass, config):
""" Initialize the Solis inverter object. """
self._hass = hass
self.config = config
self._session = async_get_clientsession(self._hass)
self._jsondata = None
self._logintime = None
self._deviceid = None
self._consecutive_failed_calls = 0
# Default english
self._language = 2
def is_online(self):
""" Returns true if the portal is online and we're logged in """
online = False
if ((self._logintime is not None) and (self._deviceid is not None)):
online = True
return online
def get_portal_data(self):
""" Return the last received json data """
return self._jsondata
def get_device_id(self):
""" Return the device ID of the inverter """
return self._deviceid
async def login(self):
""" Login to the portal. """
# Building url & params
url = 'https://'+self.config.domain+'/cpro/login/validateLogin.json'
params = {
"userName": self.config.username,
"password": <PASSWORD>,
"lan": self._language,
"domain": self.config.domain,
"userType": "C"
}
# Login call
result = await self._post_data(url, params)
if (result[SUCCESS] == True):
resultJson = result[CONTENT]
if resultJson['result'].get('isAccept') == 1:
self._logintime = datetime.now()
_LOGGER.info('Login Successful!')
else:
_LOGGER.error('Could not login to %s, are username and password correct?', url)
self._logintime = None
else:
self._logintime = None
if (self._consecutive_failed_calls == MAX_CONSECUTIVE_FAILURES):
_LOGGER.error('Failed to communicate with server %s times, last error: %s', MAX_CONSECUTIVE_FAILURES, result[MESSAGE])
async def update_device_id(self):
"""
After login the inverter list needs to be retrieved in order to get deviceID
It contains all inverters for plant plant_id with their | |
"EEBF85"
},
"275" : {
"number" : "275",
"name" : "fishcake",
"title" : u"\u306a\u308b\u3068",
"sjis" : "F6C6",
"unicode" : "E4ED",
"jis-email" : "7648",
"sjis-email" : "EBC6",
"utf-8" : "EEBF86"
},
"276" : {
"number" : "276",
"name" : "footmark",
"title" : u"\u8db3\u8de1(\u72ac)",
"sjis" : "F6C7",
"unicode" : "E4EE",
"jis-email" : "7649",
"sjis-email" : "EBC7",
"utf-8" : "EEBF87"
},
"277" : {
"number" : "277",
"name" : "devil",
"title" : u"\u60aa\u9b54",
"sjis" : "F6C8",
"unicode" : "E4EF",
"jis-email" : "764A",
"sjis-email" : "EBC8",
"utf-8" : "EEBF88"
},
"278" : {
"number" : "278",
"name" : "flowercircle",
"title" : u"\u82b1\u4e38",
"sjis" : "F6C9",
"unicode" : "E4F0",
"jis-email" : "764B",
"sjis-email" : "EBC9",
"utf-8" : "EEBF89"
},
"279" : {
"number" : "279",
"name" : "secretsign",
"title" : u"\u4e38\u79d8",
"sjis" : "F6CA",
"unicode" : "E4F1",
"jis-email" : "764C",
"sjis-email" : "EBCA",
"utf-8" : "EEBF8A"
},
"280" : {
"number" : "280",
"name" : "fullmarks",
"title" : u"100\u70b9\u6e80\u70b9",
"sjis" : "F6CB",
"unicode" : "E4F2",
"jis-email" : "764D",
"sjis-email" : "EBCB",
"utf-8" : "EEBF8B"
},
"281" : {
"number" : "281",
"name" : "punch",
"title" : u"\u30d1\u30f3\u30c1",
"sjis" : "F6CC",
"unicode" : "E4F3",
"jis-email" : "764E",
"sjis-email" : "EBCC",
"utf-8" : "EEBF8C"
},
"282" : {
"number" : "282",
"name" : "dash",
"title" : u"\u30c0\u30c3\u30b7\u30e5",
"sjis" : "F6CD",
"unicode" : "E4F4",
"jis-email" : "764F",
"sjis-email" : "EBCD",
"utf-8" : "EEBF8D"
},
"283" : {
"number" : "283",
"name" : "bigjob",
"title" : u"\u30a6\u30f3\u30c1\u30de\u30fc\u30af",
"sjis" : "F6CE",
"unicode" : "E4F5",
"jis-email" : "7650",
"sjis-email" : "EBCE",
"utf-8" : "EEBF8E"
},
"284" : {
"number" : "284",
"name" : "firstfingersign",
"title" : u"\u4eba\u5dee\u3057\u6307\u30b5\u30a4\u30f3",
"sjis" : "F6CF",
"unicode" : "E4F6",
"jis-email" : "7651",
"sjis-email" : "EBCF",
"utf-8" : "EEBF8F"
},
"285" : {
"number" : "285",
"name" : "profitsign",
"title" : u"\u30de\u30eb\u5f97",
"sjis" : "F6D0",
"unicode" : "E4F7",
"jis-email" : "7652",
"sjis-email" : "EBD0",
"utf-8" : "EEBF90"
},
"286" : {
"number" : "286",
"name" : "skeelton",
"title" : u"\u3069\u304f\u308d",
"sjis" : "F6D1",
"unicode" : "E4F8",
"jis-email" : "7653",
"sjis-email" : "EBD1",
"utf-8" : "EEBF91"
},
"287" : {
"number" : "287",
"name" : "thumbsign",
"title" : u"\u304a\u3084\u3086\u3073\u30b5\u30a4\u30f3",
"sjis" : "F6D2",
"unicode" : "E4F9",
"jis-email" : "7654",
"sjis-email" : "EBD2",
"utf-8" : "EEBF92"
},
"288" : {
"number" : "288",
"name" : "tv",
"title" : u"\u30c6\u30ec\u30d3",
"sjis" : "F6DB",
"unicode" : "E502",
"jis-email" : "765D",
"sjis-email" : "EBDB",
"utf-8" : "EEBF9B"
},
"289" : {
"number" : "289",
"name" : "microphone",
"title" : u"\u30de\u30a4\u30af",
"sjis" : "F6DC",
"unicode" : "E503",
"jis-email" : "765E",
"sjis-email" : "EBDC",
"utf-8" : "EEBF9C"
},
"290" : {
"number" : "290",
"name" : "wallet",
"title" : u"\u8ca1\u5e03",
"sjis" : "F6DD",
"unicode" : "E504",
"jis-email" : "765F",
"sjis-email" : "EBDD",
"utf-8" : "EEBF9D"
},
"291" : {
"number" : "291",
"name" : "melody",
"title" : u"\u30e1\u30ed\u30c7\u30a3",
"sjis" : "F6DE",
"unicode" : "E505",
"jis-email" : "7660",
"sjis-email" : "EBDE",
"utf-8" : "EEBF9E"
},
"292" : {
"number" : "292",
"name" : "guitar",
"title" : u"\u30ae\u30bf\u30fc(\u30a8\u30ec\u30ad)",
"sjis" : "F6DF",
"unicode" : "E506",
"jis-email" : "7661",
"sjis-email" : "EBDF",
"utf-8" : "EEBF9F"
},
"293" : {
"number" : "293",
"name" : "violin",
"title" : u"\u30d0\u30a4\u30aa\u30ea\u30f3(\u30af\u30e9\u30b7\u30c3\u30af)",
"sjis" : "F6E0",
"unicode" : "E507",
"jis-email" : "7662",
"sjis-email" : "EBE0",
"utf-8" : "EEBFA0"
},
"294" : {
"number" : "294",
"name" : "headphone",
"title" : u"\u30d8\u30c3\u30c9\u30d5\u30a9\u30f3",
"sjis" : "F6E1",
"unicode" : "E508",
"jis-email" : "7663",
"sjis-email" : "EBE1",
"utf-8" : "EEBFA1"
},
"295" : {
"number" : "295",
"name" : "rouge",
"title" : u"\u53e3\u7d05",
"sjis" : "F6E2",
"unicode" : "E509",
"jis-email" : "7664",
"sjis-email" : "EBE2",
"utf-8" : "EEBFA2"
},
"296" : {
"number" : "296",
"name" : "action",
"title" : u"\u30a2\u30af\u30b7\u30e7\u30f3(\u30d4\u30b9\u30c8\u30eb)",
"sjis" : "F6E3",
"unicode" : "E50A",
"jis-email" : "7665",
"sjis-email" : "EBE3",
"utf-8" : "EEBFA3"
},
"297" : {
"number" : "297",
"name" : "estheticsalon",
"title" : u"\u30a8\u30b9\u30c6",
"sjis" : "F6E4",
"unicode" : "E50B",
"jis-email" : "7666",
"sjis-email" : "EBE4",
"utf-8" : "EEBFA4"
},
"298" : {
"number" : "298",
"name" : "ezmark",
"title" : u"EZ\u30de\u30fc\u30af",
"sjis" : "F794",
"unicode" : "E577",
"jis-email" : "7774",
"sjis-email" : "EC94",
"utf-8" : "EF8294"
},
"299" : {
"number" : "299",
"name" : "freesign",
"title" : u"\u7121\u6599(FREE)",
"sjis" : "F795",
"unicode" : "E578",
"jis-email" : "7775",
"sjis-email" : "EC95",
"utf-8" : "EF8295"
},
"300" : {
"number" : "300",
"name" : "cd",
"title" : u"CD\/DVD",
"sjis" : "F6E5",
"unicode" : "E50C",
"jis-email" : "7667",
"sjis-email" : "EBE5",
"utf-8" : "EEBFA5"
},
"301" : {
"number" : "301",
"name" : "ladiesfashion",
"title" : u"\u5a66\u4eba\u670d",
"sjis" : "F6E6",
"unicode" : "E50D",
"jis-email" : "7668",
"sjis-email" : "EBE6",
"utf-8" : "EEBFA6"
},
"302" : {
"number" : "302",
"name" : "ufo",
"title" : u"UFO",
"sjis" : "F6E7",
"unicode" : "E50E",
"jis-email" : "7669",
"sjis-email" : "EBE7",
"utf-8" : "EEBFA7"
},
"303" : {
"number" : "303",
"name" : "up",
"title" : u"\u66f4\u65b0(UP!)",
"sjis" : "F6E8",
"unicode" : "E50F",
"jis-email" : "766A",
"sjis-email" : "EBE8",
"utf-8" : "EEBFA8"
},
"304" : {
"number" : "304",
"name" : "syringe",
"title" : u"\u6ce8\u5c04\u5668(\u8840)",
"sjis" : "F6E9",
"unicode" : "E510",
"jis-email" : "766B",
"sjis-email" : "EBE9",
"utf-8" : "EEBFA9"
},
"305" : {
"number" : "305",
"name" : "mist",
"title" : u"\u9727",
"sjis" : "F7B5",
"unicode" : "E598",
"jis-email" : "7837",
"sjis-email" : "ECB5",
"utf-8" : "EF82B5"
},
"306" : {
"number" : "306",
"name" : "golf",
"title" : u"\u30b4\u30eb\u30d5",
"sjis" : "F7B6",
"unicode" : "E599",
"jis-email" : "7838",
"sjis-email" : "ECB6",
"utf-8" : "EF82B6"
},
"307" : {
"number" : "307",
"name" : "basketball",
"title" : u"\u30d0\u30b9\u30b1\u30c3\u30c8\u30dc\u30fc\u30eb",
"sjis" : "F7B7",
"unicode" : "E59A",
"jis-email" : "7839",
"sjis-email" : "ECB7",
"utf-8" : "EF82B7"
},
"308" : {
"number" : "308",
"name" : "pager",
"title" : u"\u30dd\u30b1\u30c3\u30c8\u30d9\u30eb",
"sjis" : "F7B8",
"unicode" : "E59B",
"jis-email" : "783A",
"sjis-email" : "ECB8",
"utf-8" : "EF82B8"
},
"309" : {
"number" : "309",
"name" : "art",
"title" : u"\u30a2\u30fc\u30c8",
"sjis" : "F7B9",
"unicode" : "E59C",
"jis-email" : "783B",
"sjis-email" : "ECB9",
"utf-8" : "EF82B9"
},
"310" : {
"number" : "310",
"name" : "drama",
"title" : u"\u6f14\u5287",
"sjis" : "F7BA",
"unicode" : "E59D",
"jis-email" : "783C",
"sjis-email" : "ECBA",
"utf-8" : "EF82BA"
},
"311" : {
"number" : "311",
"name" : "event",
"title" : u"\u30a4\u30d9\u30f3\u30c8",
"sjis" : "F7BB",
"unicode" : "E59E",
"jis-email" : "783D",
"sjis-email" : "ECBB",
"utf-8" : "EF82BB"
},
"312" : {
"number" : "312",
"name" : "ribbon",
"title" : u"\u30ea\u30dc\u30f3",
"sjis" : "F7BC",
"unicode" : "E59F",
"jis-email" : "783E",
"sjis-email" : "ECBC",
"utf-8" : "EF82BC"
},
"313" : {
"number" : "313",
"name" : "birthday",
"title" : u"\u30d0\u30fc\u30b9\u30c7\u30fc",
"sjis" : "F7BD",
"unicode" : "E5A0",
"jis-email" : "783F",
"sjis-email" : "ECBD",
"utf-8" : "EF82BD"
},
"314" : {
"number" : "314",
"name" : "spade",
"title" : u"\u30b9\u30da\u30fc\u30c9",
"sjis" : "F7BE",
"unicode" : "E5A1",
"jis-email" : "7840",
"sjis-email" : "ECBE",
"utf-8" : "EF82BE"
},
"315" : {
"number" : "315",
"name" : "diamond",
"title" : u"\u30c0\u30a4\u30e4",
"sjis" : "F7BF",
"unicode" : "E5A2",
"jis-email" : "7841",
"sjis-email" : "ECBF",
"utf-8" : "EF82BF"
},
"316" : {
"number" : "316",
"name" : "clover",
"title" : u"\u30af\u30e9\u30d6",
"sjis" : "F7C0",
"unicode" : "E5A3",
"jis-email" : "7842",
"sjis-email" : "ECC0",
"utf-8" : "EF8380"
},
"317" : {
"number" : "317",
"name" : "eye",
"title" : u"\u76ee",
"sjis" : "F7C1",
"unicode" : "E5A4",
"jis-email" : "7843",
"sjis-email" : "ECC1",
"utf-8" : "EF8381"
},
"318" : {
"number" : "318",
"name" : "ear",
"title" : u"\u8033",
"sjis" : "F7C2",
"unicode" : "E5A5",
"jis-email" : "7844",
"sjis-email" : "ECC2",
"utf-8" : "EF8382"
},
"319" : {
"number" : "319",
"name" : "peacesign",
"title" : u"\u624b(\u30c1\u30e7\u30ad)",
"sjis" : "F7C3",
"unicode" : "E5A6",
"jis-email" : "7845",
"sjis-email" : "ECC3",
"utf-8" : "EF8383"
},
"320" : {
"number" : "320",
"name" : "openfinger",
"title" : u"\u624b(\u30d1\u30fc)",
"sjis" : "F7C4",
"unicode" : "E5A7",
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: EPL-2.0
#
from __future__ import print_function
import os
import pytest
import tempfile
from mock import patch
from launch_benchmark import LaunchBenchmark
from test_utils import platform_config
# Example args and output strings for testing mocks
test_model_name = "resnet50"
test_framework = "tensorflow"
test_mode = "inference"
test_precision = "fp32"
test_docker_image = "foo"
test_batch_size = "100"
test_num_cores = "1"
example_req_args = ["--model-name", test_model_name,
"--framework", test_framework,
"--mode", test_mode,
"--precision", test_precision,
"--docker-image", test_docker_image,
"--batch-size", test_batch_size,
"--num-cores", test_num_cores]
@pytest.fixture
def mock_platform_util(patch):
return patch("base_benchmark_util.platform_util.PlatformUtil")
@pytest.fixture
def mock_os(patch):
return patch("base_benchmark_util.platform_util.os")
@pytest.fixture
def mock_subprocess(patch):
return patch("base_benchmark_util.platform_util.subprocess")
@pytest.fixture
def mock_system_platform(patch):
return patch("base_benchmark_util.platform_util.system_platform")
def setup_mock_values(platform_mock, os_mock, subprocess_mock):
platform_config.set_mock_system_type(platform_mock)
platform_config.set_mock_os_access(os_mock)
platform_config.set_mock_lscpu_subprocess_values(subprocess_mock)
def test_launch_benchmark_parse_args(mock_platform_util):
"""
Verifies that that arg parsing gives us the expected results.
"""
launch_benchmark = LaunchBenchmark()
args, unknown_args = launch_benchmark.parse_args(example_req_args)
assert args.model_name == test_model_name
assert args.framework == test_framework
assert args.mode == test_mode
assert args.precision == test_precision
assert args.docker_image == test_docker_image
assert unknown_args == []
def test_launch_benchmark_parse_unknown_args(mock_platform_util):
"""
Checks parsing of unknown args
"""
launch_benchmark = LaunchBenchmark()
test_args = example_req_args + ["--test", "foo"]
args, unknown_args = launch_benchmark.parse_args(test_args)
assert unknown_args == ["--test"]
def test_launch_benchmark_parse_bad_args(mock_platform_util):
"""
Checks for a failure when no args are passed.
"""
launch_benchmark = LaunchBenchmark()
# arg parse should fail when no args are passed
with pytest.raises(SystemExit):
launch_benchmark.parse_args([])
def test_launch_benchmark_validate_args(
mock_system_platform, mock_os, mock_subprocess):
"""
Tests that valid args pass arg validation without any errors.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
launch_benchmark.validate_args(args)
def test_launch_benchmark_validate_bad_framework(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an unsupported framework name errors.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.framework = "foo"
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "The specified framework is not supported" in str(e)
def test_launch_benchmark_validate_bad_checkpoint_dir(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an invalid checkpoint path fails.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
bad_path = "/path/does/not_exist"
args.checkpoint = bad_path
with pytest.raises(IOError) as e:
launch_benchmark.validate_args(args)
assert "The checkpoint location {} does not exist".format(bad_path) \
in str(e)
# test with a file
with tempfile.NamedTemporaryFile() as temp_file:
args.checkpoint = temp_file.name
with pytest.raises(IOError) as e:
launch_benchmark.validate_args(args)
assert "The checkpoint location {} is not a directory".format(
temp_file.name) in str(e)
def test_launch_benchmark_validate_checkpoint_dir(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that a valid checkpoint path passes.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
temp_dir = tempfile.mkdtemp()
args.checkpoint = temp_dir
try:
launch_benchmark.validate_args(args)
finally:
os.rmdir(temp_dir)
def test_launch_benchmark_validate_model_source_dir(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that a valid model source path passes.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
temp_dir = tempfile.mkdtemp()
args.model_source_dir = temp_dir
try:
launch_benchmark.validate_args(args)
finally:
os.rmdir(temp_dir)
def test_launch_benchmark_validate_bad_in_graph(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an invalid input graph path fails.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
# test with path that does not exist
bad_path = "/path/does/not_exist"
args.input_graph = bad_path
with pytest.raises(IOError) as e:
launch_benchmark.validate_args(args)
assert "The input graph {} does not exist".format(bad_path) \
in str(e)
# test with path that is a directory
temp_dir = tempfile.mkdtemp()
args.input_graph = temp_dir
try:
with pytest.raises(IOError) as e:
launch_benchmark.validate_args(args)
assert "The input graph {} must be a file".format(temp_dir) \
in str(e)
finally:
os.rmdir(temp_dir)
def test_launch_benchmark_validate_in_graph(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that a valid input graph path passes.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
with tempfile.NamedTemporaryFile() as temp_file:
args.input_graph = temp_file.name
launch_benchmark.validate_args(args)
def test_launch_benchmark_validate_bad_batch_size(mock_platform_util):
"""
Verifies that a bad batch size fails
"""
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.batch_size = 0
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "The batch size 0 is not valid." in str(e)
args.batch_size = -100
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "The batch size -100 is not valid." in str(e)
def test_launch_benchmark_validate_num_cores(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that a bad num cores fails
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.num_cores = 0
expected_error = ("Core number must be greater than 0 or -1. The default "
"value is -1 which means using all the cores in the "
"sockets")
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert expected_error in str(e)
args.num_cores = -100
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert expected_error in str(e)
@patch("subprocess.Popen")
def test_launch_benchmark_validate_model(
mock_popen, mock_platform_util):
"""
Verifies that a valid model name passes validation and starts a docker
container.
"""
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.model_name = "resnet50"
launch_benchmark.run_docker_container(args)
assert mock_popen.called
args, kwargs = mock_popen.call_args
assert "docker" == args[0][0]
assert "run" == args[0][1]
def test_launch_benchmark_validate_bad_model(mock_platform_util):
"""
Verifies that a bad model name fails
"""
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.model_name = "foo"
with pytest.raises(ValueError) as e:
launch_benchmark.run_docker_container(args)
assert "No model was found for" in str(e)
def test_launch_benchmark_validate_bad_docker_image(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an invalid docker image fails.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.docker_image = "test "
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "docker image string should " \
"not have whitespace(s)" in str(e)
def test_launch_benchmark_validate_bad_intra_threads(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an invalid num intra threads fails.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.num_intra_threads = -1
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "Number of intra threads " \
"value should be greater than 0" in str(e)
def test_launch_benchmark_validate_bad_inter_threads(
mock_system_platform, mock_os, mock_subprocess):
"""
Verifies that an invalid num inter threads fails.
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.num_inter_threads = -1
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "Number of inter threads " \
"value should be greater than 0" in str(e)
def test_launch_benchmark_validate_empty_model(mock_platform_util):
"""
Verifies that giving no model name fails
"""
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args.model_name = ""
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "The model name is not valid" in str(e)
@pytest.mark.parametrize("arg_name",
["input_graph"])
def test_link_file_input_validation(
mock_system_platform, mock_os, mock_subprocess,
arg_name):
"""
Tests args that take a file path to ensure that sym links and hard links
are not allowed. Creates a symlink and hard link of a temporary file and
verifies that the launch script fails with an appropriate error message.
"""
with tempfile.NamedTemporaryFile() as temp_file:
# directory where the temp file is located
parent_dir = os.path.dirname(temp_file.name)
# create sym link to the temp file
symlink_file = os.path.join(parent_dir, "temp_symlink_file")
if os.path.exists(symlink_file):
os.remove(symlink_file)
os.symlink(temp_file.name, symlink_file)
# create hard link to the temp file
hardlink_file = os.path.join(parent_dir, "temp_hardlink_file")
if os.path.exists(hardlink_file):
os.remove(hardlink_file)
os.link(temp_file.name, hardlink_file)
try:
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args_dict = vars(args)
# Test that hard link errors
args_dict[arg_name] = hardlink_file
print(args_dict)
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "cannot be a link" in str(e)
# Test that sym link errors
args_dict[arg_name] = symlink_file
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "cannot be a link" in str(e)
finally:
if os.path.exists(symlink_file):
os.remove(symlink_file)
if os.path.exists(hardlink_file):
os.remove(hardlink_file)
@pytest.mark.parametrize("arg_name",
["model_source_dir", "checkpoint", "data_location"])
def test_symlink_directory_input_validation(mock_system_platform, mock_os,
mock_subprocess, arg_name):
"""
Tests args that take a directory path to ensure that symlinks are not
allowed. Creates a symlink of a temporary directory and verifies that the
launch script fails with an appropriate error message.
"""
# create temp directory
temp_dir = tempfile.mkdtemp()
parent_dir = os.path.dirname(temp_dir)
# create sym link to the temp directory
symlink_dir = os.path.join(parent_dir, "temp_symlink_dir")
if os.path.exists(symlink_dir):
os.remove(symlink_dir)
os.symlink(temp_dir, symlink_dir)
try:
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
args, _ = launch_benchmark.parse_args(example_req_args)
args_dict = vars(args)
args_dict[arg_name] = symlink_dir
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "cannot be a link" in str(e)
finally:
if os.path.exists(symlink_dir):
os.remove(symlink_dir)
os.rmdir(temp_dir)
def test_output_results_with_benchmarking(mock_system_platform, mock_os, mock_subprocess):
"""
Tests that the launch script fails when trying to get inference results when benchmarking
"""
setup_mock_values(mock_system_platform, mock_os, mock_subprocess)
launch_benchmark = LaunchBenchmark()
test_args = ["--model-name", test_model_name,
"--framework", test_framework,
"--mode", "training",
"--precision", test_precision,
"--docker-image", test_docker_image,
"--benchmark-only",
"--output-results"]
args, _ = launch_benchmark.parse_args(test_args)
with pytest.raises(ValueError) as e:
launch_benchmark.validate_args(args)
assert "--output-results can only be used when | |
see lines 4355-4363 in calendrica-3.0.cl
def chinese_location(tee):
"""Return location of Beijing; time zone varies with time, tee."""
year = gregorian_year_from_fixed(ifloor(tee))
if (year < 1929):
return location(angle(39, 55, 0), angle(116, 25, 0),
mt(43.5), hr(1397/180))
else:
return location(angle(39, 55, 0), angle(116, 25, 0),
mt(43.5), hr(8))
# see lines 4365-4377 in calendrica-3.0.cl
def chinese_solar_longitude_on_or_after(lam, date):
"""Return moment (Beijing time) of the first date on or after
fixed date, date, (Beijing time) when the solar longitude
will be 'lam' degrees."""
tee = solar_longitude_after(lam,
universal_from_standard(date,
chinese_location(date)))
return standard_from_universal(tee, chinese_location(tee))
# see lines 4379-4387 in calendrica-3.0.cl
def current_major_solar_term(date):
"""Return last Chinese major solar term (zhongqi) before
fixed date, date."""
s = solar_longitude(universal_from_standard(date,
chinese_location(date)))
return amod(2 + quotient(int(s), deg(30)), 12)
# see lines 4389-4397 in calendrica-3.0.cl
def major_solar_term_on_or_after(date):
"""Return moment (in Beijing) of the first Chinese major
solar term (zhongqi) on or after fixed date, date. The
major terms begin when the sun's longitude is a
multiple of 30 degrees."""
s = solar_longitude(midnight_in_china(date))
l = mod(30 * ceiling(s / 30), 360)
return chinese_solar_longitude_on_or_after(l, date)
# see lines 4399-4407 in calendrica-3.0.cl
def current_minor_solar_term(date):
"""Return last Chinese minor solar term (jieqi) before date, date."""
s = solar_longitude(universal_from_standard(date,
chinese_location(date)))
return amod(3 + quotient(s - deg(15), deg(30)), 12)
# see lines 4409-4422 in calendrica-3.0.cl
def minor_solar_term_on_or_after(date):
"""Return moment (in Beijing) of the first Chinese minor solar
term (jieqi) on or after fixed date, date. The minor terms
begin when the sun's longitude is an odd multiple of 15 degrees."""
s = solar_longitude(midnight_in_china(date))
l = mod(30 * ceiling((s - deg(15)) / 30) + deg(15), 360)
return chinese_solar_longitude_on_or_after(l, date)
# see lines 4424-4433 in calendrica-3.0.cl
def chinese_new_moon_before(date):
"""Return fixed date (Beijing) of first new moon before fixed date, date."""
tee = new_moon_before(midnight_in_china(date))
return ifloor(standard_from_universal(tee, chinese_location(tee)))
# see lines 4435-4444 in calendrica-3.0.cl
def chinese_new_moon_on_or_after(date):
"""Return fixed date (Beijing) of first new moon on or after
fixed date, date."""
tee = new_moon_at_or_after(midnight_in_china(date))
return ifloor(standard_from_universal(tee, chinese_location(tee)))
# see lines 4446-4449 in calendrica-3.0.cl
CHINESE_EPOCH = fixed_from_gregorian(gregorian_date(-2636, FEBRUARY, 15))
# see lines 4451-4457 in calendrica-3.0.cl
def is_chinese_no_major_solar_term(date):
"""Return True if Chinese lunar month starting on date, date,
has no major solar term."""
return (current_major_solar_term(date) ==
current_major_solar_term(chinese_new_moon_on_or_after(date + 1)))
# see lines 4459-4463 in calendrica-3.0.cl
def midnight_in_china(date):
"""Return Universal time of (clock) midnight at start of fixed
date, date, in China."""
return universal_from_standard(date, chinese_location(date))
# see lines 4465-4474 in calendrica-3.0.cl
def chinese_winter_solstice_on_or_before(date):
"""Return fixed date, in the Chinese zone, of winter solstice
on or before fixed date, date."""
approx = estimate_prior_solar_longitude(WINTER,
midnight_in_china(date + 1))
return next(ifloor(approx) - 1,
lambda day: WINTER < solar_longitude(
midnight_in_china(1 + day)))
# see lines 4476-4500 in calendrica-3.0.cl
def chinese_new_year_in_sui(date):
"""Return fixed date of Chinese New Year in sui (period from
solstice to solstice) containing date, date."""
s1 = chinese_winter_solstice_on_or_before(date)
s2 = chinese_winter_solstice_on_or_before(s1 + 370)
next_m11 = chinese_new_moon_before(1 + s2)
m12 = chinese_new_moon_on_or_after(1 + s1)
m13 = chinese_new_moon_on_or_after(1 + m12)
leap_year = iround((next_m11 - m12) / MEAN_SYNODIC_MONTH) == 12
if (leap_year and
(is_chinese_no_major_solar_term(m12) or is_chinese_no_major_solar_term(m13))):
return chinese_new_moon_on_or_after(1 + m13)
else:
return m13
# see lines 4502-4511 in calendrica-3.0.cl
def chinese_new_year_on_or_before(date):
"""Return fixed date of Chinese New Year on or before fixed date, date."""
new_year = chinese_new_year_in_sui(date)
if (date >= new_year):
return new_year
else:
return chinese_new_year_in_sui(date - 180)
# see lines 4513-4518 in calendrica-3.0.cl
def chinese_new_year(g_year):
"""Return fixed date of Chinese New Year in Gregorian year, g_year."""
return chinese_new_year_on_or_before(
fixed_from_gregorian(gregorian_date(g_year, JULY, 1)))
# see lines 4520-4565 in calendrica-3.0.cl
def chinese_from_fixed(date):
"""Return Chinese date (cycle year month leap day) of fixed date, date."""
s1 = chinese_winter_solstice_on_or_before(date)
s2 = chinese_winter_solstice_on_or_before(s1 + 370)
next_m11 = chinese_new_moon_before(1 + s2)
m12 = chinese_new_moon_on_or_after(1 + s1)
leap_year = iround((next_m11 - m12) / MEAN_SYNODIC_MONTH) == 12
m = chinese_new_moon_before(1 + date)
month = amod(iround((m - m12) / MEAN_SYNODIC_MONTH) -
(1 if (leap_year and
is_chinese_prior_leap_month(m12, m)) else 0),
12)
leap_month = (leap_year and
is_chinese_no_major_solar_term(m) and
(not is_chinese_prior_leap_month(m12,
chinese_new_moon_before(m))))
elapsed_years = (ifloor(mpf(1.5) -
(month / 12) +
((date - CHINESE_EPOCH) / MEAN_TROPICAL_YEAR)))
cycle = 1 + quotient(elapsed_years - 1, 60)
year = amod(elapsed_years, 60)
day = 1 + (date - m)
return chinese_date(cycle, year, month, leap_month, day)
# see lines 4567-4596 in calendrica-3.0.cl
def fixed_from_chinese(c_date):
"""Return fixed date of Chinese date, c_date."""
cycle = chinese_cycle(c_date)
year = chinese_year(c_date)
month = chinese_month(c_date)
leap = chinese_leap(c_date)
day = chinese_day(c_date)
mid_year = ifloor(CHINESE_EPOCH +
((((cycle - 1) * 60) + (year - 1) + 1/2) *
MEAN_TROPICAL_YEAR))
new_year = chinese_new_year_on_or_before(mid_year)
p = chinese_new_moon_on_or_after(new_year + ((month - 1) * 29))
d = chinese_from_fixed(p)
prior_new_moon = (p if ((month == chinese_month(d)) and
(leap == chinese_leap(d)))
else chinese_new_moon_on_or_after(1 + p))
return prior_new_moon + day - 1
# see lines 4598-4607 in calendrica-3.0.cl
def is_chinese_prior_leap_month(m_prime, m):
"""Return True if there is a Chinese leap month on or after lunar
month starting on fixed day, m_prime and at or before
lunar month starting at fixed date, m."""
return ((m >= m_prime) and
(is_chinese_no_major_solar_term(m) or
is_chinese_prior_leap_month(m_prime, chinese_new_moon_before(m))))
# see lines 4609-4615 in calendrica-3.0.cl
def chinese_name(stem, branch):
"""Return BOGUS if stem/branch combination is impossible."""
if (mod(stem, 2) == mod(branch, 2)):
return [stem, branch]
else:
return BOGUS
# see lines 4617-4619 in calendrica-3.0.cl
def chinese_stem(name):
return name[0]
# see lines 4621-4623 in calendrica-3.0.cl
def chinese_branch(name):
return name[1]
# see lines 4625-4629 in calendrica-3.0.cl
def chinese_sexagesimal_name(n):
"""Return the n_th name of the Chinese sexagesimal cycle."""
return chinese_name(amod(n, 10), amod(n, 12))
# see lines 4631-4644 in calendrica-3.0.cl
def chinese_name_difference(c_name1, c_name2):
"""Return the number of names from Chinese name c_name1 to the
next occurrence of Chinese name c_name2."""
stem1 = chinese_stem(c_name1)
stem2 = chinese_stem(c_name2)
branch1 = chinese_branch(c_name1)
branch2 = chinese_branch(c_name2)
stem_difference = stem2 - stem1
branch_difference = branch2 - branch1
return 1 + mod(stem_difference - 1 +
25 * (branch_difference - stem_difference), 60)
# see lines 4646-4649 in calendrica-3.0.cl
# see lines 214-215 in calendrica-3.0.errata.cl
def chinese_year_name(year):
"""Return sexagesimal name for Chinese year, year, of any cycle."""
return chinese_sexagesimal_name(year)
# see lines 4651-4655 in calendrica-3.0.cl
CHINESE_MONTH_NAME_EPOCH = 57
# see lines 4657-4664 in calendrica-3.0.cl
# see lines 211-212 in calendrica-3.0.errata.cl
def chinese_month_name(month, year):
"""Return sexagesimal name for month, month, of Chinese year, year."""
elapsed_months = (12 * (year - 1)) + (month - 1)
return chinese_sexagesimal_name(elapsed_months - CHINESE_MONTH_NAME_EPOCH)
# see lines 4666-4669 in calendrica-3.0.cl
CHINESE_DAY_NAME_EPOCH = rd(45)
# see lines 4671-4675 in calendrica-3.0.cl
# see lines 208-209 in calendrica-3.0.errata.cl
def chinese_day_name(date):
"""Return Chinese sexagesimal name for date, date."""
return chinese_sexagesimal_name(date - CHINESE_DAY_NAME_EPOCH)
# see lines 4677-4687 in calendrica-3.0.cl
def chinese_day_name_on_or_before(name, date):
"""Return fixed date of latest date on or before fixed date, date, that
has Chinese name, name."""
return (date -
mod(date +
chinese_name_difference(name,
chinese_sexagesimal_name(CHINESE_DAY_NAME_EPOCH)),
60))
# see lines 4689-4699 in calendrica-3.0.cl
def dragon_festival(g_year):
"""Return fixed date of the Dragon Festival occurring in Gregorian
year g_year."""
elapsed_years = 1 + g_year - gregorian_year_from_fixed(CHINESE_EPOCH)
cycle = 1 + quotient(elapsed_years - 1, 60)
year = amod(elapsed_years, 60)
return fixed_from_chinese(chinese_date, cycle, year, 5, false, 5)
# see lines 4701-4708 in calendrica-3.0.cl
def qing_ming(g_year):
"""Return fixed date of Qingming occurring in Gregorian year, g_year."""
return ifloor(minor_solar_term_on_or_after(
fixed_from_gregorian(gregorian_date(g_year, MARCH, 30))))
# see lines 4710-4722 in calendrica-3.0.cl
def chinese_age(birthdate, date):
"""Return the age at fixed date, date, given Chinese birthdate, birthdate,
according to the Chinese custom.
Returns BOGUS if date is before birthdate."""
today = chinese_from_fixed(date)
if (date >= fixed_from_chinese(birthdate)):
return (60 * (chinese_cycle(today) - chinese_cycle(birthdate)) +
(chinese_year(today) - chinese_year(birthdate)) + 1)
else:
return BOGUS
# see lines 4724-4758 in calendrica-3.0.cl
def chinese_year_marriage_augury(cycle, year):
"""Return the marriage augury type of Chinese year, year in cycle, cycle.
0 means lichun does not occur (widow or double-blind years),
1 means it occurs once at the end (blind),
2 means it occurs once at the start (bright), and
3 means it occurs twice (double-bright or double-happiness)."""
new_year = fixed_from_chinese(chinese_date(cycle, year, 1, False, 1))
c = (cycle + 1) if (year == 60) else cycle
y = 1 if (year == 60) else (year + 1)
next_new_year = fixed_from_chinese(chinese_date(c, y, 1, False, 1))
first_minor_term = current_minor_solar_term(new_year)
next_first_minor_term = current_minor_solar_term(next_new_year)
if ((first_minor_term == 1) and (next_first_minor_term == 12)):
res = 0
elif ((first_minor_term == 1) and (next_first_minor_term != 12)):
res = 1
elif ((first_minor_term != 1) and (next_first_minor_term == 12)):
res = 2
else:
res = 3
return res
# see lines 4760-4769 in calendrica-3.0.cl
def japanese_location(tee):
"""Return the location for Japanese calendar; varies with moment, tee."""
year = gregorian_year_from_fixed(ifloor(tee))
| |
<gh_stars>0
# -*- coding:utf-8 -*-
"""
IG Markets REST API Library for Python
http://labs.ig.com/rest-trading-api-reference
Original version by <NAME> - 2014 - http://uk.linkedin.com/in/lewisbarber/
Modified by <NAME> - 2014-2015 - https://github.com/femtotrader/
"""
import json
import time
import urllib
from base64 import b64encode, b64decode
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
from requests import Session
from .utils import conv_datetime, conv_to_ms
import logging
logger = logging.getLogger("siis.connector.ig.rest")
class IGException(Exception):
pass
class IGSessionCRUD(object):
"""
Session with CRUD operation
@todo add the encryptionKey to send a salted password : /gateway/deal/session/encryptionKey
"""
CLIENT_TOKEN = None
SECURITY_TOKEN = None
BASIC_HEADERS = None
LOGGED_IN_HEADERS = None
DELETE_HEADERS = None
BASE_URL = None
HEADERS = {}
def __init__(self, base_url, api_key, session):
self.BASE_URL = base_url
self.API_KEY = api_key
self.HEADERS['BASIC'] = {
'X-IG-API-KEY': self.API_KEY,
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8'
}
self.session = session
self.create = self._create_first
self.lightstreamer_endpoint = None
def _get_session(self, session):
"""
Returns a Requests session if session is None or session if it's not None (cached session
with requests-cache for example)
:param session:
:return:
"""
if session is None:
session = self.session # requests Session
else:
session = session
return session
def _url(self, endpoint):
"""
Returns url from endpoint and base url
"""
return self.BASE_URL + endpoint
def _create_first(self, endpoint, params, session):
"""
Create first = POST with headers=BASIC_HEADERS
"""
url = self._url(endpoint)
session = self._get_session(session)
if type (params['password']) is bytes:
params['password'] = params['password'].decode()
response = session.post(url, data=json.dumps(params), headers=self.HEADERS['BASIC'])
if not response.ok:
raise(Exception("HTTP status code %s %s " % (response.status_code, response.text)))
self._set_headers(response.headers, True)
self.create = self._create_logged_in
data = json.loads(response.text)
self.lightstreamer_endpoint = data.get('lightstreamerEndpoint')
return response
def _create_logged_in(self, endpoint, params, session):
"""
Create when logged in = POST with headers=LOGGED_IN_HEADERS
"""
url = self._url(endpoint)
session = self._get_session(session)
response = session.post(url, data=json.dumps(params), headers=self.HEADERS['LOGGED_IN'])
return response
def read(self, endpoint, params, session):
"""
Read = GET with headers=LOGGED_IN_HEADERS
"""
url = self._url(endpoint)
session = self._get_session(session)
response = session.get(url, params=params, headers=self.HEADERS['LOGGED_IN'])
return response
def update(self, endpoint, params, session):
"""
Update = PUT with headers=LOGGED_IN_HEADERS
"""
url = self._url(endpoint)
session = self._get_session(session)
response = session.put(url, data=json.dumps(params), headers=self.HEADERS['LOGGED_IN'])
return response
def delete(self, endpoint, params, session):
"""
Delete = POST with DELETE_HEADERS
"""
url = self._url(endpoint)
session = self._get_session(session)
response = session.post(url, data=json.dumps(params), headers=self.HEADERS['DELETE'])
return response
def req(self, action, endpoint, params, session):
"""
Send a request (CREATE READ UPDATE or DELETE)
"""
d_actions = {
'create': self.create,
'read': self.read,
'update': self.update,
'delete': self.delete
}
return d_actions[action](endpoint, params, session)
def _set_headers(self, response_headers, update_cst):
"""
Sets headers
"""
if update_cst:
self.CLIENT_TOKEN = response_headers['CST']
if 'X-SECURITY-TOKEN' in response_headers:
self.SECURITY_TOKEN = response_headers['X-SECURITY-TOKEN']
else:
self.SECURITY_TOKEN = None
self.HEADERS['LOGGED_IN'] = {
'X-IG-API-KEY': self.API_KEY,
'X-SECURITY-TOKEN': self.SECURITY_TOKEN,
'CST': self.CLIENT_TOKEN,
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8'
}
self.HEADERS['DELETE'] = {
'X-IG-API-KEY': self.API_KEY,
'X-SECURITY-TOKEN': self.SECURITY_TOKEN,
'CST': self.CLIENT_TOKEN,
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8',
'_method': 'DELETE'
}
class IGService:
D_BASE_URL = {
'live': 'https://api.ig.com/gateway/deal',
'demo': 'https://demo-api.ig.com/gateway/deal'
}
API_KEY = None
IG_USERNAME = None
IG_PASSWORD = <PASSWORD>
def __init__(self, username, password, api_key, acc_type="demo", session=None):
"""
Constructor, calls the method required to connect to the API (accepts acc_type = LIVE or DEMO)
"""
self.API_KEY = api_key
self.IG_USERNAME = username
self.IG_PASSWORD = password
self.ig_session = None
try:
self.BASE_URL = self.D_BASE_URL[acc_type.lower()]
except Exception:
raise(Exception("Invalid account type specified, please provide LIVE or DEMO."))
self.parse_response = self.parse_response_with_exception
if session is None:
self.session = Session() # Requests Session (global)
else:
self.session = session
self.crud_session = IGSessionCRUD(self.BASE_URL, self.API_KEY, self.session)
def _get_session(self, session):
"""
Returns a Requests session (from self.session) if session is None
or session if it's not None (cached session with requests-cache for example)
"""
if session is None:
session = self.session # requests Session
else:
assert(isinstance(session, Session)), "session must be <requests.session.Session object> not %s" % type(session)
session = session
return session
def _req(self, action, endpoint, params, session):
"""
Creates a CRUD request and returns response
"""
session = self._get_session(session)
response = self.crud_session.req(action, endpoint, params, session)
return response
# ---------- PARSE_RESPONSE ----------- #
def parse_response_without_exception(self, *args, **kwargs):
"""
Parses JSON response
returns dict
no exception raised when error occurs"""
response = json.loads(*args, **kwargs)
return response
def parse_response_with_exception(self, *args, **kwargs):
"""
Parses JSON response
returns dict
exception raised when error occurs"""
response = json.loads(*args, **kwargs)
if 'errorCode' in response:
raise(Exception(response['errorCode']))
return response
# -------- ACCOUNT ------- #
def fetch_account(self, accountId, session=None):
"""
Fetch account and filter for a particular.
"""
params = {}
endpoint = '/accounts'
action = 'read'
response = self._req(action, endpoint, params, session)
data = self.parse_response(response.text)
if data is not None and data.get('accounts'):
for account in data.get('accounts'):
if account.get('accountId') == accountId:
return account
# nothing
return {'accountType': '', 'accountName': '', 'currency': '', 'balance': {'profitLoss': 0, 'balance': 0, 'available': 0}}
def fetch_account_activity_by_period(self, milliseconds, session=None):
"""
Returns the account activity history for the last specified period
"""
milliseconds = conv_to_ms(milliseconds)
params = {}
url_params = {
'milliseconds': milliseconds
}
endpoint = '/history/activity/{milliseconds}'.format(**url_params)
action = 'read'
response = self._req(action, endpoint, params, session)
data = self.parse_response(response.text)
return data
def fetch_transaction_history_by_type_and_period(self, milliseconds, trans_type, session=None):
"""
Returns the transaction history for the specified transaction type and period
"""
milliseconds = conv_to_ms(milliseconds)
params = {}
url_params = {
'milliseconds': milliseconds,
'trans_type': trans_type
}
endpoint = '/history/transactions/{trans_type}/{milliseconds}'.format(**url_params)
action = 'read'
response = self._req(action, endpoint, params, session)
data = self.parse_response(response.text)
return data
def fetch_transaction_history(self, trans_type=None, from_date=None,
to_date=None, max_span_seconds=None,
page_size=None, page_number=None,
session=None):
"""
Returns the transaction history for the specified transaction type and period
"""
params = {}
if trans_type:
params['type'] = trans_type
if from_date:
if hasattr(from_date, 'isoformat'):
from_date = from_date.isoformat()
params['from'] = from_date
if to_date:
if hasattr(to_date, 'isoformat'):
to_date = to_date.isoformat()
params['to'] = to_date
if max_span_seconds:
params['maxSpanSeconds'] = max_span_seconds
if page_size:
params['pageSize'] = page_size
if page_number:
params['pageNumber'] = page_number
endpoint = '/history/transactions'
action = 'read'
self.crud_session.HEADERS['LOGGED_IN']['Version'] = "2"
response = self._req(action, endpoint, params, session)
del(self.crud_session.HEADERS['LOGGED_IN']['Version'])
data = self.parse_response(response.text)
return data
# -------- DEALING -------- #
def fetch_deal_by_deal_reference(self, deal_reference, session=None):
"""
Returns a deal confirmation for the given deal reference
"""
params = {}
url_params = {
'deal_reference': deal_reference
}
endpoint = '/confirms/{deal_reference}'.format(**url_params)
action = 'read'
for i in range(5):
response = self._req(action, endpoint, params, session)
if response.status_code == 404:
logger.info("Deal reference %s not found, retrying." % deal_reference)
time.sleep(1)
else:
break
data = self.parse_response(response.text)
return data
def fetch_open_positions(self, session=None):
"""
Returns all open positions for the active account
"""
params = {}
endpoint = '/positions'
action = 'read'
response = self._req(action, endpoint, params, session)
data = self.parse_response(response.text)
return data.get('positions', [])
def close_open_position(self, deal_id, direction, epic, expiry, level, order_type, quote_id, size, session=None):
"""
Closes one or more OTC positions
"""
params = {
'dealId': deal_id,
'direction': direction,
'epic': epic,
'expiry': expiry,
'level': level,
'orderType': order_type,
'quoteId': quote_id,
'size': size
}
endpoint = '/positions/otc'
action = 'delete'
response = self._req(action, endpoint, params, session)
if response.status_code == 200:
deal_reference = json.loads(response.text)['dealReference']
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def create_open_position(self, currency_code, direction, epic, expiry,
force_open, guaranteed_stop, level,
limit_distance, limit_level, order_type,
quote_id, size, stop_distance, stop_level, time_in_force,
deal_reference=None, session=None):
"""
Creates an OTC position
"""
params = {
'currencyCode': currency_code,
'direction': direction,
'epic': epic,
'expiry': expiry,
'forceOpen': force_open,
'guaranteedStop': guaranteed_stop,
'level': level,
'limitDistance': limit_distance,
'limitLevel': limit_level,
'orderType': order_type,
'quoteId': quote_id,
'size': size,
'stopDistance': stop_distance,
'stopLevel': stop_level
}
if deal_reference:
params['dealReference'] = deal_reference
endpoint = '/positions/otc'
action = 'create'
response = self._req(action, endpoint, params, session)
if response.status_code == 200:
res_deal_reference = json.loads(response.text)['dealReference']
return self.fetch_deal_by_deal_reference(res_deal_reference)
else:
raise IGException(response.text)
def update_open_position(self, limit_level, stop_level, deal_id, session=None):
"""
Updates an OTC position
"""
params = {
'limitLevel': limit_level,
'stopLevel': stop_level,
# 'trailingStop': False, # only in v2
# 'trailingStopDistance': None,
# 'trailingStopIncrement': None
}
url_params = {
'deal_id': deal_id
}
endpoint = '/positions/otc/{deal_id}'.format(**url_params)
action = 'update'
response = self._req(action, endpoint, params, session)
if response.status_code == 200:
deal_reference = json.loads(response.text)['dealReference']
return self.fetch_deal_by_deal_reference(deal_reference)
else:
raise IGException(response.text)
def fetch_working_orders(self, session=None):
"""
Returns all open working orders for the active account
"""
params = {}
endpoint = '/workingorders'
action = 'read'
response = self._req(action, endpoint, params, session)
data = self.parse_response(response.text)
return data
def create_working_order(self, currency_code, direction, epic, expiry,
guaranteed_stop, level, size,
time_in_force, order_type,
limit_distance=None, limit_level=None,
stop_distance=None, stop_level=None,
good_till_date=None, deal_reference=None,
force_open=False, session=None):
"""
Creates an OTC working order
"""
VERSION = 2
if good_till_date | |
0x89, 0xA7, 0xE6, 0x80, 0xA1, 0xE6, 0x83, 0xA0, 0xE5, 0x89, 0xBF, 0xE8, 0x94, 0xAC, 0xE8,
0x96, 0xAF, 0xE8, 0x94, 0xA5, 0xE7, 0x8A, 0xA7, 0xE7, 0x89, 0xB2, 0xE6, 0x89, 0xB9, 0xE6, 0x85,
0xB7, 0xE8, 0x8C, 0x85, 0xE5, 0xBB, 0xAC, 0xE7, 0xB2, 0xB9, 0xE8, 0x9C, 0x87, 0xE8, 0xB9, 0x8B,
0xE7, 0x80, 0x9D, 0xE6, 0xA0, 0xBD, 0xE9, 0xA5, 0x97, 0xE4, 0xBF, 0xBA, 0xE7, 0x87, 0xBB, 0xE5,
0x94, 0x8F, 0xE5, 0x99, 0x93, 0xE8, 0x85, 0xA5, 0xE8, 0xB8, 0x90, 0xE6, 0xBE, 0x88, 0xE8, 0x98,
0x8B, 0xE4, 0xB8, 0xB8, 0xE9, 0x80, 0xB8, 0xE7, 0xB3, 0xB0, 0xE9, 0xB9, 0xBD, 0xE7, 0x9E, 0xA5,
0xE5, 0x93, 0xA2, 0xE5, 0x9A, 0x93, 0xE7, 0xB9, 0x86, 0xE7, 0xA2, 0x91, 0xE7, 0xB8, 0x9B, 0xE9,
0x9C, 0x93, 0xE8, 0x99, 0x94, 0xE8, 0xB4, 0x96, 0xE5, 0xAF, 0xA5, 0xE5, 0xBE, 0xA1, 0xE7, 0xBE,
0x94, 0xE7, 0xA9, 0xB9, 0xE7, 0xB4, 0x97, 0xE7, 0xB6, 0xBE, 0xE9, 0x9A, 0x98, 0xE7, 0x99, 0xA2,
0xE8, 0x92, 0x90, 0xE7, 0xB2, 0x92, 0xE4, 0xBA, 0x91, 0xE6, 0xB9, 0xAE, 0xE5, 0xB3, 0x99, 0xE4,
0xBE, 0xAE, 0xE6, 0x8C, 0xAA, 0xE6, 0x8F, 0x84, 0xE8, 0xAA, 0xA8, 0xE6, 0x81, 0xAA, 0xE8, 0x82,
0x8B, 0xE5, 0x8D, 0x93, 0xE8, 0xB3, 0x85, 0xE6, 0x83, 0x98, 0xE6, 0x82, 0xBC, 0xE5, 0xB8, 0x86,
0xE8, 0x83, 0x8E, 0xE8, 0xAA, 0x93, 0xEF, 0xBD, 0x90, 0xE6, 0xA1, 0xB6, 0xE7, 0xA9, 0x8E, 0xE6,
0xA6, 0x9C, 0xE5, 0xB2, 0x8C, 0xE6, 0x8E, 0xAE, 0xE5, 0x9D, 0xB7, 0xE5, 0xA2, 0x9F, 0xE7, 0x8E,
0xBB, 0xE7, 0x92, 0x83, 0xE8, 0x94, 0x94, 0xE6, 0xB4, 0xB6, 0xE6, 0x87, 0xA3, 0xE7, 0xA4, 0xAA,
0xE6, 0x94, 0x80, 0xE5, 0x94, 0xB0, 0xE8, 0xA4, 0xB2, 0xE8, 0xA5, 0xA0, 0xE8, 0xB2, 0xB6, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE8, 0x89, 0x98, 0xE6, 0x8A, 0x84, 0xE8, 0x9A,
0xAF, 0xE8, 0x9A, 0x93, 0xE9, 0xAE, 0xAD, 0xE7, 0xA2, 0x9F, 0xE9, 0xBB, 0xB4, 0xE6, 0xA2, 0xA2,
0xE8, 0xBD, 0x84, 0xE6, 0x94, 0x94, 0xE4, 0xBF, 0x91, 0xE5, 0x88, 0x91, 0xE5, 0x8C, 0xB9, 0xE7,
0xB9, 0x9A, 0xE6, 0xAA, 0xB8, 0xE6, 0xAA, 0xAC, 0xE8, 0xB7, 0x8B, 0xE9, 0x83, 0x8E, 0xE8, 0xAD,
0xB4, 0xE6, 0x81, 0xAC, 0xE5, 0x86, 0xAC, 0xE5, 0xA3, 0x87, 0xE7, 0xB1, 0x83, 0xE6, 0x91, 0xA7,
0xE6, 0xB7, 0xA9, 0xE5, 0xA4, 0x99, 0xE6, 0x9E, 0x9D, 0xE8, 0xB3, 0xA4, 0xE2, 0x84, 0xAB, 0xE4,
0xBC, 0x8E, 0xE5, 0x80, 0x86, 0xE5, 0x9B, 0x94, 0xE6, 0x86, 0xAB, 0xE5, 0xAD, 0xBD, 0xE9, 0x81,
0x82, 0xE8, 0xAC, 0xAC, 0xE5, 0x98, 0x8E, 0xE2, 0x96, 0xA0, 0xE5, 0xBB, 0x88, 0xE6, 0x8B, 0x93,
0xE6, 0xAE, 0xBF, 0xE5, 0x88, 0xB7, 0xE6, 0x99, 0xB4, 0xE6, 0xAF, 0x8B, 0xE5, 0xBA, 0xB8, 0xE5,
0x98, 0x81, 0xE6, 0x9A, 0xAE, 0xE5, 0x9D, 0x8D, 0xE7, 0x9E, 0xA0, 0xE9, 0x8A, 0x85, 0xE8, 0x8F,
0x81, 0xE7, 0x8B, 0x90, 0xE7, 0xBA, 0x9C, 0xE5, 0xA8, 0x83, 0xE7, 0x99, 0xB1, 0xE7, 0x98, 0x93,
0xE6, 0x9B, 0xB0, 0xE6, 0x81, 0x86, 0xE7, 0xA5, 0x9F, 0xE5, 0xB8, 0xB7, 0xE5, 0x8D, 0x9C, 0xE6,
0x98, 0x8C, 0xE9, 0x9C, 0x9C, 0xE2, 0x84, 0xAB, 0xE6, 0x8B, 0x86, 0xE5, 0xB7, 0xAB, 0xE5, 0xB3,
0xB6, 0xE5, 0xB6, 0xBC, 0xE6, 0xA4, 0xB0, 0xE7, 0x9A, 0x87, 0xE7, 0xBF, 0xA9, 0xE6, 0x92, 0xA9,
0xE5, 0xBC, 0xA6, 0xE9, 0xB6, 0xB4, 0xE5, 0xBB, 0x82, 0xE5, 0x99, 0xA5, 0xE8, 0xB6, 0xA8, 0xE5,
0xA5, 0xB4, 0xE5, 0x8C, 0xAA, 0xE5, 0x92, 0xB1, 0xE5, 0x80, 0x94, 0xE3, 0x81, 0xAE, 0xE3, 0x82,
0xA2, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xA2, 0xE3, 0x83, 0xAA, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0xA6,
0xE3, 0x82, 0xB9, 0xE3, 0x83, 0xA9, 0xE3, 0x81, 0x84, 0xE8, 0xBE, 0xBA, 0xE3, 0x83, 0x9E, 0xE3,
0x82, 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x84, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0x8D, 0xE3, 0x82,
0xAD, 0xE3, 0x83, 0xA3, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0x9C, 0xE3, 0x82, 0xA8, 0xE5, 0x9B, 0xB3,
0xE6, 0x88, 0xA6, 0xE5, 0x86, 0x85, 0xE7, 0xA0, 0xA6, 0xE7, 0xB8, 0xA6, 0xE6, 0x97, 0xA7, 0xE3,
0x82, 0xAC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB8, 0xE3, 0x82, 0xB0, 0xE3, 0x83,
0x8B, 0xE3, 0x83, 0x90, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0xA7, 0xE5, 0xBA, 0x83, 0xE5, 0xBB, 0x83,
0xE3, 0x83, 0x91, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0x80, 0xE3,
0x82, 0xAF, 0xE6, 0xB0, 0xB7, 0xE7, 0x94, 0xBA, 0xE9, 0x89, 0xB1, 0xE3, 0x81, 0x97, 0xE3, 0x81,
0x82, 0xE3, 0x82, 0x8A, 0xE5, 0x8C, 0xBB, 0xE5, 0xAD, 0xA6, 0xE8, 0xA6, 0xB3, 0xE6, 0xB3, 0x8A,
0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xA5, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x9B, 0xE3,
0x83, 0x86, 0xE5, 0xAF, 0xBA, 0xE9, 0xAC, 0x83, 0xE6, 0x85, 0xA7, 0xE8, 0x80, 0xBF, 0xE6, 0xB0,
0x93, 0xE6, 0xA3, 0x89, 0xE5, 0xBF, 0xA4, 0xE5, 0x9E, 0xAE, 0xE8, 0x8C, 0xAC, 0xE8, 0x82, 0x98,
0xE8, 0x83, 0xA5, 0xE7, 0xA2, 0xB4, 0xE6, 0xA2, 0xB3, 0xE8, 0x87, 0x86, 0xE5, 0xAA, 0x9A, 0xE8,
0xA0, 0x91, 0xE8, 0x9E, 0x88, 0xE6, 0xB7, 0xB9, 0xE8, 0x95, 0xAD, 0xE5, 0x95, 0xA5, 0xE5, 0x8D,
0x92, 0xE5, 0xBB, 0x9D, 0xE8, 0x82, 0x96, 0xE5, 0x98, 0x88, 0xE6, 0xB8, 0xA3, 0xE6, 0xBB, 0x93,
0xE5, 0x94, 0xAC, 0xE5, 0xAC, 0x89, 0xE4, 0xBF, 0x8F, 0xE6, 0xBE, 0x84, 0xE5, 0x90, 0x9D, 0xE6,
0xB0, 0x8F, 0xE6, 0xB3, 0xB3, 0xE8, 0x89, 0xA6, 0xE9, 0xA2, 0xAF, 0xE7, 0x8F, 0x80, 0xE6, 0xB7,
0x8C, 0xE7, 0xAB, 0xA3, 0xE8, 0x9C, 0x83, 0xE7, 0xA8, 0xBF, 0xE9, 0xA3, 0xAA, 0xEF, 0xBD, 0x99,
0xEF, 0xBD, 0x82, 0xE5, 0x8A, 0x89, 0xE6, 0x95, 0x9D, 0xE9, 0x8B, 0x8C, 0xE9, 0xA1, 0x9B, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84,
0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB,
0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2,
0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE2, 0x84, 0xAB, 0xE9, 0x85, 0xB5, 0xE5, 0xBF, 0xB1, 0xE8, 0x95,
0x8E, 0xE7, 0x9F, 0x97, 0xE9, 0x81, 0x8F, 0xE7, 0x98, 0x9F, 0xE4, 0xBD, 0x90, 0xE6, 0x88, 0x9A,
0xE5, 0x8B, 0x98, 0xE6, 0x9D, 0xBE, 0xE8, 0xBB, 0xBC, 0xE8, 0x85, 0xAE, 0xE5, 0x9D, 0xAA, 0xE7,
0xB5, 0xAE, 0xE6, 0x81, 0x8D, 0xE6, 0x83, 0x9A, 0xE7, 0xB0, 0xBF, 0xE8, 0xB3, 0x80, 0xE7, 0xAA,
0x92, | |
"""The test for the here_travel_time sensor platform."""
import logging
from unittest.mock import patch
import urllib
import herepy
import pytest
from homeassistant.components.here_travel_time.sensor import (
ATTR_ATTRIBUTION,
ATTR_DESTINATION,
ATTR_DESTINATION_NAME,
ATTR_DISTANCE,
ATTR_DURATION,
ATTR_DURATION_IN_TRAFFIC,
ATTR_ORIGIN,
ATTR_ORIGIN_NAME,
ATTR_ROUTE,
CONF_MODE,
CONF_TRAFFIC_MODE,
CONF_UNIT_SYSTEM,
ICON_BICYCLE,
ICON_CAR,
ICON_PEDESTRIAN,
ICON_PUBLIC,
ICON_TRUCK,
NO_ROUTE_ERROR_MESSAGE,
ROUTE_MODE_FASTEST,
ROUTE_MODE_SHORTEST,
SCAN_INTERVAL,
TRAFFIC_MODE_DISABLED,
TRAFFIC_MODE_ENABLED,
TRAVEL_MODE_BICYCLE,
TRAVEL_MODE_CAR,
TRAVEL_MODE_PEDESTRIAN,
TRAVEL_MODE_PUBLIC,
TRAVEL_MODE_PUBLIC_TIME_TABLE,
TRAVEL_MODE_TRUCK,
UNIT_OF_MEASUREMENT,
)
from homeassistant.const import ATTR_ICON, EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, load_fixture
DOMAIN = "sensor"
PLATFORM = "here_travel_time"
APP_ID = "test"
APP_CODE = "test"
TRUCK_ORIGIN_LATITUDE = "41.9798"
TRUCK_ORIGIN_LONGITUDE = "-87.8801"
TRUCK_DESTINATION_LATITUDE = "41.9043"
TRUCK_DESTINATION_LONGITUDE = "-87.9216"
BIKE_ORIGIN_LATITUDE = "41.9798"
BIKE_ORIGIN_LONGITUDE = "-87.8801"
BIKE_DESTINATION_LATITUDE = "41.9043"
BIKE_DESTINATION_LONGITUDE = "-87.9216"
CAR_ORIGIN_LATITUDE = "38.9"
CAR_ORIGIN_LONGITUDE = "-77.04833"
CAR_DESTINATION_LATITUDE = "39.0"
CAR_DESTINATION_LONGITUDE = "-77.1"
def _build_mock_url(origin, destination, modes, app_id, app_code, departure):
"""Construct a url for HERE."""
base_url = "https://route.cit.api.here.com/routing/7.2/calculateroute.json?"
parameters = {
"waypoint0": f"geo!{origin}",
"waypoint1": f"geo!{destination}",
"mode": ";".join(str(herepy.RouteMode[mode]) for mode in modes),
"app_id": app_id,
"app_code": app_code,
"departure": departure,
}
url = base_url + urllib.parse.urlencode(parameters)
return url
def _assert_truck_sensor(sensor):
"""Assert that states and attributes are correct for truck_response."""
assert sensor.state == "14"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 13.533333333333333
assert sensor.attributes.get(ATTR_DISTANCE) == 13.049
assert sensor.attributes.get(ATTR_ROUTE) == (
"I-190; I-294 S - Tri-State Tollway; I-290 W - Eisenhower Expy W; "
"IL-64 W - E North Ave; I-290 E - Eisenhower Expy E; I-290"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 13.533333333333333
assert sensor.attributes.get(ATTR_ORIGIN) == ",".join(
[TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE]
)
assert sensor.attributes.get(ATTR_DESTINATION) == ",".join(
[TRUCK_DESTINATION_LATITUDE, TRUCK_DESTINATION_LONGITUDE]
)
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == ""
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == "Eisenhower Expy E"
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_TRUCK
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_TRUCK
@pytest.fixture
def requests_mock_credentials_check(requests_mock):
"""Add the url used in the api validation to all requests mock."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock.get(
response_url, text=load_fixture("here_travel_time/car_response.json")
)
return requests_mock
@pytest.fixture
def requests_mock_truck_response(requests_mock_credentials_check):
"""Return a requests_mock for truck respones."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_TRUCK, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([TRUCK_ORIGIN_LATITUDE, TRUCK_ORIGIN_LONGITUDE]),
",".join([TRUCK_DESTINATION_LATITUDE, TRUCK_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/truck_response.json")
)
@pytest.fixture
def requests_mock_car_disabled_response(requests_mock_credentials_check):
"""Return a requests_mock for truck respones."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_response.json")
)
async def test_car(hass, requests_mock_car_disabled_response):
"""Test that car works."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "30"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 30.05
assert sensor.attributes.get(ATTR_DISTANCE) == 23.903
assert sensor.attributes.get(ATTR_ROUTE) == (
"US-29 - K St NW; US-29 - Whitehurst Fwy; "
"I-495 N - Capital Beltway; MD-187 S - Old Georgetown Rd"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 31.016666666666666
assert sensor.attributes.get(ATTR_ORIGIN) == ",".join(
[CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]
)
assert sensor.attributes.get(ATTR_DESTINATION) == ",".join(
[CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]
)
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "22nd St NW"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == "Service Rd S"
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_CAR
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_CAR
# Test traffic mode disabled
assert sensor.attributes.get(ATTR_DURATION) != sensor.attributes.get(
ATTR_DURATION_IN_TRAFFIC
)
async def test_traffic_mode_enabled(hass, requests_mock_credentials_check):
"""Test that traffic mode enabled works."""
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_ENABLED]
response_url = _build_mock_url(
",".join([CAR_ORIGIN_LATITUDE, CAR_ORIGIN_LONGITUDE]),
",".join([CAR_DESTINATION_LATITUDE, CAR_DESTINATION_LONGITUDE]),
modes,
APP_ID,
APP_CODE,
"now",
)
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_enabled_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"traffic_mode": True,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
# Test traffic mode enabled
assert sensor.attributes.get(ATTR_DURATION) != sensor.attributes.get(
ATTR_DURATION_IN_TRAFFIC
)
async def test_imperial(hass, requests_mock_car_disabled_response):
"""Test that imperial units work."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": CAR_ORIGIN_LATITUDE,
"origin_longitude": CAR_ORIGIN_LONGITUDE,
"destination_latitude": CAR_DESTINATION_LATITUDE,
"destination_longitude": CAR_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"unit_system": "imperial",
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 14.852635608048994
async def test_route_mode_shortest(hass, requests_mock_credentials_check):
"""Test that route mode shortest works."""
origin = "38.902981,-77.048338"
destination = "39.042158,-77.119116"
modes = [ROUTE_MODE_SHORTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_shortest_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"route_mode": ROUTE_MODE_SHORTEST,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 18.388
async def test_route_mode_fastest(hass, requests_mock_credentials_check):
"""Test that route mode fastest works."""
origin = "38.902981,-77.048338"
destination = "39.042158,-77.119116"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_CAR, TRAFFIC_MODE_ENABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/car_enabled_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"traffic_mode": True,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.attributes.get(ATTR_DISTANCE) == 23.381
async def test_truck(hass, requests_mock_truck_response):
"""Test that truck works."""
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": TRUCK_ORIGIN_LATITUDE,
"origin_longitude": TRUCK_ORIGIN_LONGITUDE,
"destination_latitude": TRUCK_DESTINATION_LATITUDE,
"destination_longitude": TRUCK_DESTINATION_LONGITUDE,
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_TRUCK,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
_assert_truck_sensor(sensor)
async def test_public_transport(hass, requests_mock_credentials_check):
"""Test that publicTransport works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PUBLIC, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/public_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PUBLIC,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "89"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 89.16666666666667
assert sensor.attributes.get(ATTR_DISTANCE) == 22.325
assert sensor.attributes.get(ATTR_ROUTE) == (
"332 - Palmer/Schiller; 332 - Cargo Rd./Delta Cargo; " "332 - Palmer/Schiller"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 89.16666666666667
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "Mannheim Rd"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PUBLIC
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PUBLIC
async def test_public_transport_time_table(hass, requests_mock_credentials_check):
"""Test that publicTransportTimeTable works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PUBLIC_TIME_TABLE, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url,
text=load_fixture("here_travel_time/public_time_table_response.json"),
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PUBLIC_TIME_TABLE,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "80"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 79.73333333333333
assert sensor.attributes.get(ATTR_DISTANCE) == 14.775
assert sensor.attributes.get(ATTR_ROUTE) == (
"330 - Archer/Harlem (Terminal); 309 - Elmhurst Metra Station"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 79.73333333333333
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "<NAME>"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PUBLIC_TIME_TABLE
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PUBLIC
async def test_pedestrian(hass, requests_mock_credentials_check):
"""Test that pedestrian works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_PEDESTRIAN, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/pedestrian_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_PEDESTRIAN,
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.test")
assert sensor.state == "211"
assert sensor.attributes.get("unit_of_measurement") == UNIT_OF_MEASUREMENT
assert sensor.attributes.get(ATTR_ATTRIBUTION) is None
assert sensor.attributes.get(ATTR_DURATION) == 210.51666666666668
assert sensor.attributes.get(ATTR_DISTANCE) == 12.533
assert sensor.attributes.get(ATTR_ROUTE) == (
"Mannheim Rd; W Belmont Ave; Cullerton St; E Fullerton Ave; "
"La Porte Ave; E Palmer Ave; N Railroad Ave; W North Ave; "
"E North Ave; E Third St"
)
assert sensor.attributes.get(CONF_UNIT_SYSTEM) == "metric"
assert sensor.attributes.get(ATTR_DURATION_IN_TRAFFIC) == 210.51666666666668
assert sensor.attributes.get(ATTR_ORIGIN) == origin
assert sensor.attributes.get(ATTR_DESTINATION) == destination
assert sensor.attributes.get(ATTR_ORIGIN_NAME) == "Mannheim Rd"
assert sensor.attributes.get(ATTR_DESTINATION_NAME) == ""
assert sensor.attributes.get(CONF_MODE) == TRAVEL_MODE_PEDESTRIAN
assert sensor.attributes.get(CONF_TRAFFIC_MODE) is False
assert sensor.attributes.get(ATTR_ICON) == ICON_PEDESTRIAN
async def test_bicycle(hass, requests_mock_credentials_check):
"""Test that bicycle works."""
origin = "41.9798,-87.8801"
destination = "41.9043,-87.9216"
modes = [ROUTE_MODE_FASTEST, TRAVEL_MODE_BICYCLE, TRAFFIC_MODE_DISABLED]
response_url = _build_mock_url(origin, destination, modes, APP_ID, APP_CODE, "now")
requests_mock_credentials_check.get(
response_url, text=load_fixture("here_travel_time/bike_response.json")
)
config = {
DOMAIN: {
"platform": PLATFORM,
"name": "test",
"origin_latitude": origin.split(",")[0],
"origin_longitude": origin.split(",")[1],
"destination_latitude": destination.split(",")[0],
"destination_longitude": destination.split(",")[1],
"app_id": APP_ID,
"app_code": APP_CODE,
"mode": TRAVEL_MODE_BICYCLE,
}
}
assert await async_setup_component(hass, | |
"""Module that handles shared information for all network objects."""
import xml.etree.ElementTree as et
import numbers
import numpy as np
import pandas as pd
import scipy.sparse as sps
from paminco.utils.readin import parse_number, xml_find_root
from paminco.utils.misc import Cache
from paminco.utils.typing import sparse_format, is_int, is_iterable, IntEnum2
import paminco._doc as _doc
ID_UNMAPPED = -9999
LBL_UNMAPPED = "Invalid"
class FlowDirection(IntEnum2):
"""Enum defining the type flow for the graph."""
DIRECTED = 0
"""All edges can only take flow >= 0."""
UNDIRECTED = 1
"""All edges can take any flow."""
MIXED = 2
"""Some edges may only take flow >= 0."""
class Edges:
"""
Class that contains the edges/links of a network.
An edges object can be instantiated in several ways:
Edges(e)
where ``e`` is an Edges object. Data in ``e`` will be
copied if specfied by parameter ``copy``.
Edges(st)
where ``st`` is array_like. Parameter st is converted to
ndarray and is expected to me of shape (m, 2). Can be
node indices or node labels specifying an edge. If labels
are given, indices are mapped by ``map_labels_to_indices``,
given indices are mapped by ``map_indices_to_labels``. Edge
bounds are determined by the parameter ``directed``.
Edges((st, bounds))
where ``st`` is array_like and ``bounds`` is tuple (lower,
upper) specifying bounds used for all edges or array_like
of shape (m, 2) marking individual bounds for all edges.
Edges((labels, indices, bounds))
where ``labels``, ``indices`` are array_like of shape
(m, 2) and ``bounds`` is tuple (lower, upper) specifying
bounds used for all edges or array_like of shape (m, 2)
containing individual bounds for all edges.
Parameters
----------
data : ndarray, or tuple of ndarray
Edge data.
directed_flow : bool, default=True
Controls default values for ``None`` in bounds. If ``True``, lower
bounds are set to 0 and ``False`` to -inf. Missing upper bounds are
set to inf.
map_labels_to_indices : None, bool, dict, or callable, default=True
Determines mapping of labels to indices if no indices are
given. If ``None`` or ``False``, indices of edges will be set
to -9999, denoting invalid edge indices. If ``dict``,
labels will be mapped by this dict. If ``True``, node indices
are set to 0, 1, ..., n-1. If ``callable``, use callable with
signature ``indices = callable(labels)``.
map_indices_to_labels : None, bool, dict, or callable, default=True
Determines mapping of indices to indices if no labels are
given. If ``None`` or ``False``, indices of edges will be set
to 'invalid', denoting invalid edge labels. If ``dict``,
indices will be mapped by this dict. If ``True``, node labels
are set to node indices as str. If ``callable``, use callable
with signature ``labels = callable(indices)``.
dtype_float : dtype, default=numpy.float_
Datatype for edge bounds.
dtype_int : dtype, default=int
Datatype for edge bounds.
copy : bool, default=False
Whether to create a copy of the inputs in data.
Attributes
----------
flow_directions : ndarray
Ndarray of shape (m, ). A ``-1`` denotes an edge with lb < 0 and
ub <= 0. A ``0`` denotes an edge with lb < 0 and ub > 0.
A ``1`` denotes an edge with lb >=0 and ub > 0.
"""
def __init__(
self,
data,
directed_flow: bool = True,
map_labels_to_indices=True, # optional
map_indices_to_labels=True, # optional
dtype_float=None,
dtype_int=None,
copy: bool = False,
) -> None:
# Collect kwargs
kw = {
"directed_flow": directed_flow,
"map_labels_to_indices": map_labels_to_indices,
"map_indices_to_labels": map_indices_to_labels,
"dtype_float": dtype_float,
"dtype_int": dtype_int,
"copy": copy,
}
if isinstance(data, Edges):
d = (data.labels, data.indices, data.bounds)
return self.__init__(d,
dtype_float=data.dtype_float,
dtype_int=data.dtype_int)
elif isinstance(data, tuple):
if len(data) == 3:
pass
elif len(data) == 2:
# (labels or indices, bounds)
st, bounds = data
st = np.array(st)
if st.dtype.kind in {'U', 'S'}:
# (labels, bounds)
if isinstance(map_labels_to_indices, dict):
st_ids = np.vectorize(map_labels_to_indices.__getitem__)(st)
elif map_labels_to_indices is True:
# Automap labels
# Get unique labels and sort them if quasi-ints
unique_lbl = np.unique(st)
try:
unique_lbl = sorted(unique_lbl, key=int)
except ValueError:
pass
d = dict(zip(unique_lbl, np.arange(len(unique_lbl))))
st_ids = np.vectorize(d.__getitem__)(st)
elif map_labels_to_indices is None or map_labels_to_indices is False:
# Set to invalid indices
st_ids = np.full(st.shape, ID_UNMAPPED, dtype=int)
else:
# Map labels by callable
st_ids = map_labels_to_indices(st)
return self.__init__((st, st_ids, bounds), **kw)
elif issubclass(st.dtype.type, numbers.Integral):
# (indices, bounds)
unique_st = np.unique(st)
if np.array_equal(np.sort(unique_st), np.arange(len(unique_st))) is False:
raise ValueError(f"Indices must be all integers from 0 to {len(unique_st) - 1}.")
if isinstance(map_indices_to_labels, dict):
st_lbl = np.vectorize(map_indices_to_labels.__getitem__)(st)
elif map_indices_to_labels is True:
st_lbl = st.astype(str)
elif map_indices_to_labels is None or map_indices_to_labels is False:
# Set to invalid indices
st_lbl = np.full(st.shape, LBL_UNMAPPED)
else:
st_lbl = map_indices_to_labels(st)
return self.__init__((st_lbl, st, bounds), **kw)
else:
raise ValueError(f"Invalid edge data: {data}.")
else:
raise ValueError(f"Invalid edge data: {data}.")
else:
# Only labels or indices given -> build lower and upper bounds by directed
if directed_flow is True:
return self.__init__((data, (0, np.inf)), **kw)
return self.__init__((data, (-np.inf, np.inf)), **kw)
# Handle datatypes
if dtype_float is None:
dtype_float = np.float64
if dtype_int is None:
dtype_int = int
self._dtype_float = dtype_float
self._dtype_int = dtype_int
# Unpack data
labels, indices, bounds = data
self.labels = np.array(labels, dtype=str, copy=copy)
self.indices = np.array(indices, dtype=dtype_int, copy=copy)
# Broadcast bounds if lower, upper for all edges given
if not isinstance(bounds, np.ndarray):
bounds = np.array(bounds)
if len(bounds.shape) == 1:
bounds = bounds.reshape(1, -1)
bounds = np.repeat(bounds, len(labels), axis=0)
# Handle 'None' bounds
bkw = {"posinf": np.inf, "neginf": -np.inf}
self.bounds = np.array(bounds, dtype=dtype_float, copy=copy)
self.bounds[:, 1] = np.nan_to_num(self.bounds[:, 1],
copy=False,
nan=np.inf,
**bkw)
if directed_flow is True:
self.bounds[:, 0] = np.nan_to_num(self.bounds[:, 0],
copy=False,
nan=0.,
**bkw)
else:
self.bounds[:, 0] = np.nan_to_num(self.bounds[:, 0],
copy=False,
nan=-np.inf,
**bkw)
# Check consistency of labels, indices and bounds
if self.labels.shape[1] != 2:
raise ValueError(
f"Invalid edge data, labels are of shape {self.labels.shape}."
)
if (self.labels.shape == self.indices.shape == self.bounds.shape) is False:
raise ValueError(
"Inconsistent shapes. "
f"Labels: {self.labels.shape}, "
f"indices: {self.indices.shape}, "
f"bounds: {self.bounds.shape}."
)
# Set edge directions and get type of graph
self.flow_directions = np.zeros(len(self))
self.flow_directions[self.lb < 0] -= 1
self.flow_directions[self.ub > 0] += 1
if len(self.flow_undirected) == len(self):
self.flow_dir = FlowDirection.UNDIRECTED
elif len(self.flow_undirected) == 0:
self.flow_dir = FlowDirection.DIRECTED
else:
self.flow_dir = FlowDirection.MIXED
self.cache = Cache()
def __eq__(self, other) -> bool:
for att in ["labels", "indices", "bounds"]:
if np.array_equal(getattr(self, att), getattr(other, att)) is False:
return False
return True
def __len__(self) -> int:
return len(self.indices)
def __getitem__(self, idx):
if is_iterable(idx):
return [self[i] for i in idx]
return {att: getattr(self, att)[idx]
for att in ["source_lbl", "target_lbl", "s", "t", "lb", "ub"]}
def to_df(self, **kwargs) -> pd.DataFrame:
"""Get object as DataFrame.
Parameters
----------
**kwargs : keyword arguments, optional
Passed to DataFrame constructor.
Returns
-------
df : pandas.DataFrame
Edges with source/target labels, source/target ids, lower
and upper bounds.
"""
data = np.hstack([self.labels, self.indices, self.bounds])
df = pd.DataFrame(data, **kwargs)
df.columns = ["source_lbl", "target_lbl", "s", "t", "lb", "ub"]
df[["source_lbl", "target_lbl"]] = df[["source_lbl", "target_lbl"]].astype(str)
df[["s", "t"]] = df[["s", "t"]].astype(self._dtype_int)
df[["lb", "ub"]] = df[["lb", "ub"]].astype(self._dtype_float)
return df
def get_flow_df(
self,
x,
labels: bool = True,
colname_flow: str = "flow"
) -> pd.DataFrame:
if isinstance(x, (int, float)):
x = np.full(len(self), x)
if labels is True:
s, t = self.source_lbl, self.target_lbl
dtype = str
else:
s, t = self.s, self.t
dtype = int
df = pd.DataFrame({"source": s,
"target": t,
colname_flow: x})
df[["source", "target"]] = df[["source", "target"]].astype(dtype)
return df
def get_directed(
self,
w=None,
backward_positive: bool = True
) -> tuple:
if self.cache.is_valid("directed_elements") is False:
forward = self.ub > 0
backward = self.lb < 0
s_fw, t_fw = self.indices[forward, :].T
t_bw, s_bw = self.indices[backward, :].T
s = np.hstack((s_fw, s_bw))
t = np.hstack((t_fw, t_bw))
self.cache["directed_elements"] = (forward, backward, s, t)
else:
(forward, backward, s, t) = self.cache["directed_elements"]
if w is not None:
w_fw = w[forward]
w_bw = w[backward]
if backward_positive is False:
w_bw = - w_bw
# Stack weight similar to source, target
w = np.hstack((w_fw, w_bw))
return s, t, w
return s, t
def get_duplicate_edges(self) -> np.ndarray:
# Dubplicates -> s/t both are the same
st = pd.Series([str(a) + "-" + str(b) for (a, b) in self.indices])
return np.where(st.duplicated())[0]
def map_labels(self, d: | |
Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
update_network_sm_device_fields=options.get("update_network_sm_device_fields"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/device/fields'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('update_network_sm_device_fields')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def wipe_network_sm_device(self,
options=dict()):
"""Does a PUT request to /networks/{networkId}/sm/device/wipe.
Wipe a device
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
wipe_network_sm_device -- WipeNetworkSmDeviceModel --
TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/device/wipe'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('wipe_network_sm_device')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def refresh_network_sm_device_details(self,
options=dict()):
"""Does a POST request to /networks/{networkId}/sm/device/{deviceId}/refreshDetails.
Refresh the details of a device
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
device_id -- string -- TODO: type description here.
Example:
Returns:
void: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
device_id=options.get("device_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/device/{deviceId}/refreshDetails'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None),
'deviceId': options.get('device_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.http_client.post(_query_url)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
def get_network_sm_devices(self,
options=dict()):
"""Does a GET request to /networks/{networkId}/sm/devices.
List the devices enrolled in an SM network with various specified
fields and filters
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
fields -- string -- Additional fields that will be
displayed for each device. Multiple fields can be
passed in as comma separated values. The default
fields are: id, name, tags, ssid, wifiMac, osName,
systemModel, uuid, and serialNumber. The additional
fields are: ip, systemType,
availableDeviceCapacity, kioskAppName, biosVersion,
lastConnected, missingAppsCount, userSuppliedAddress,
location, lastUser, ownerEmail, ownerUsername,
publicIp, phoneNumber, diskInfoJson, deviceCapacity,
isManaged, hadMdm, isSupervised, meid, imei, iccid,
simCarrierNetwork, cellularDataUsed, isHotspotEnabled,
createdAt, batteryEstCharge, quarantined, avName,
avRunning, asName, fwName, isRooted,
loginRequired, screenLockEnabled, screenLockDelay,
autoLoginDisabled, autoTags, hasMdm, hasDesktopAgent,
diskEncryptionEnabled, hardwareEncryptionCaps,
passCodeLock, usesHardwareKeystore, and
androidSecurityPatchVersion.
wifi_macs -- string -- Filter devices by wifi mac(s).
Multiple wifi macs can be passed in as comma separated
values.
serials -- string -- Filter devices by serial(s). Multiple
serials can be passed in as comma separated values.
ids -- string -- Filter devices by id(s). Multiple ids can
be passed in as comma separated values.
scope -- string -- Specify a scope (one of all, none,
withAny, withAll, withoutAny, or withoutAll) and a set
of tags as comma separated values.
batch_size -- int -- Number of devices to return, 1000 is
the default as well as the max.
batch_token -- string -- If the network has more devices
than the batch size, a batch token will be returned
as a part of the device list. To see the remainder of
the devices, pass in the batchToken as a parameter in
the next request. Requests made with the
batchToken do not require additional parameters as the
batchToken includes the parameters passed in with
the original request. Additional parameters passed in
with the batchToken will be ignored.
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/devices'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_parameters = {
'fields': options.get('fields', None),
'wifiMacs': options.get('wifi_macs', None),
'serials': options.get('serials', None),
'ids': options.get('ids', None),
'scope': options.get('scope', None),
'batchSize': options.get('batch_size', None),
'batchToken': options.get('batch_token', None)
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def checkin_network_sm_devices(self,
options=dict()):
"""Does a PUT request to /networks/{networkId}/sm/devices/checkin.
Force check-in a set of devices
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
checkin_network_sm_devices -- CheckinNetworkSmDevicesModel
-- TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/devices/checkin'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(options.get('checkin_network_sm_devices')))
CustomHeaderAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body)
def move_network_sm_devices(self,
options=dict()):
"""Does a PUT request to /networks/{networkId}/sm/devices/move.
Move a set of devices to a new network
Args:
options (dict, optional): Key-value pairs for any of the
parameters to this API Endpoint. All parameters to the
endpoint are supplied through the dictionary with their names
being the key and their desired values being the value. A list
of parameters that can be used are::
network_id -- string -- TODO: type description here.
Example:
move_network_sm_devices -- MoveNetworkSmDevicesModel --
TODO: type description here. Example:
Returns:
mixed: Response from the API. Successful operation
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(network_id=options.get("network_id"),
move_network_sm_devices=options.get("move_network_sm_devices"))
# Prepare query URL
_url_path = '/networks/{networkId}/sm/devices/move'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'networkId': options.get('network_id', None)
})
_query_builder = Configuration.base_uri
| |
<gh_stars>1-10
"""Provides the hardware abstraction layer"""
from time import sleep
import time
import numpy as np
import pystorm
from pystorm.PyDriver import bddriver as bd
from pystorm.hal.neuromorph.core import Core
from pystorm.hal.neuromorph.core_pars import CORE_PARAMETERS
from pystorm.hal.cal_db import CalibrationDB
import logging
logger = logging.getLogger(__name__)
DIFFUSOR_NORTH_LEFT = bd.bdpars.DiffusorCutLocationId.NORTH_LEFT
DIFFUSOR_NORTH_RIGHT = bd.bdpars.DiffusorCutLocationId.NORTH_RIGHT
DIFFUSOR_WEST_TOP = bd.bdpars.DiffusorCutLocationId.WEST_TOP
DIFFUSOR_WEST_BOTTOM = bd.bdpars.DiffusorCutLocationId.WEST_BOTTOM
# notes that affect nengo BE:
# - send_inputs->set_inputs/get_outputs are different (SpikeFilter/Generator now used)
# - arguments for remap_core/implement_core have changed (use the self.last_mapped objects now)
# - graph.Pool now takes an argument with its encoders.
# Its input dimensionality is now self.dimensions.
# therefore making a connection to a pool will have no weight: the only kind of graph
# connection that should have weights is a connection going into a Bucket.
# not sure when I changed this/if Terry has incorporated changes. I had forgotten my
# original intent.
CORE_ID = 0 # hardcoded for now
_bdp = bd.bdpars.BDPars()
DAC_DEFAULTS = dict(
DAC_SYN_EXC = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_EXC),
DAC_SYN_DC = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_DC),
DAC_SYN_INH = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_INH),
DAC_SYN_LK = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_LK),
DAC_SYN_PD = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_PD),
DAC_SYN_PU = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SYN_PU),
DAC_DIFF_G = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_DIFF_G),
DAC_DIFF_R = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_DIFF_R),
DAC_SOMA_REF = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SOMA_REF),
DAC_SOMA_OFFSET = _bdp.GetDACDefaultCount(bd.bdpars.BDHornEP.DAC_SOMA_OFFSET),)
class Singleton:
"""Decorator class ensuring that at most one instance of a decorated class exists"""
def __init__(self,klass):
self.klass = klass
self.instance = None
def __call__(self,*args,**kwds):
if self.instance == None:
self.instance = self.klass(*args,**kwds)
return self.instance
@Singleton
class HAL:
"""Hardware Abstraction Layer
Abstracts away the details of the underlying hardware, and presents a
unified API for higher level clients.
Attributes
----------
driver: Instance of pystorm.PyDriver Driver
"""
def __init__(self, use_soft_driver=False):
if use_soft_driver:
self.driver = bd.BDModelDriver()
else:
self.driver = bd.Driver()
# init calibration DB
cal_db_fname_prefix = "/".join(
pystorm.__file__.split('/')[:-2])+"/pystorm/calibration/data/pystorm_cal_db/cal_db"
self.cdb = CalibrationDB(cal_db_fname_prefix)
self.chip_activation = None
self.chip_name = None
# init FPGA from bitfile
okfile = "/".join(
pystorm.__file__.split('/')[:-2])+"/FPGA/quartus/output_files/OKCoreBD.rbf"
self.driver.SetOKBitFile(okfile)
self.start_hardware()
# default time resolution
self.downstream_ns = 10000
self.upstream_ns = 1000000
self.last_mapped_network = None
self.last_mapped_core = None
self.init_hardware()
def init_hardware(self):
logger.info("HAL: clearing hardware state")
# stop spikes before resetting
#self.stop_all_inputs()
self.driver.InitBD()
# DAC settings (should be pretty close to driver defaults)
# magnitude of the three synapse inputs (can be used to balance exc/inh)
# there are scale factors on each of the outputs
# excitatory/8 - DC/16 is the height of the excitatory synapse pulse
# DC/16 - inhibitory/128 is the height of the inhibitory synapse pulse
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_EXC , DAC_DEFAULTS['DAC_SYN_EXC']) # excitatory level, scaled 1/8
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_DC , DAC_DEFAULTS['DAC_SYN_DC']) # DC baseline level, scaled 1/16
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_INH , DAC_DEFAULTS['DAC_SYN_INH']) # inhibitory level, scaled 1/128
# 1/DAC_SYN_LK ~ synaptic time constant, 10 is around .1 ms
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_LK , DAC_DEFAULTS['DAC_SYN_LK'])
# synapse pulse extender rise time/fall time
# 1/DAC_SYN_PD ~ synapse PE fall time
# 1/DAC_SYN_PU ~ synapse PE rise time
# the synapse is "on" during the fall, and "off" during the rise
# making the rise longer doesn't have much of a practical purpose
# when saturated, fall time/rise time is the peak on/off duty cycle (proportionate to synaptic strength)
# be careful setting these too small, you don't want to saturate the synapse
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_PD , DAC_DEFAULTS['DAC_SYN_PD'])
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SYN_PU , DAC_DEFAULTS['DAC_SYN_PU'])
# DAC_DIFF_R / DAC_DIFF_G ratio sets diffusor spread,
# larger (smaller) ratio ~ more (less) spread out
# R ~ conductance of the "sideways" resistors, G ~ conductance of the "downwards" resistors
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_DIFF_G , DAC_DEFAULTS['DAC_DIFF_G'])
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_DIFF_R , DAC_DEFAULTS['DAC_DIFF_R'])
# 1/DAC_SOMA_REF ~ soma refractory period, 10 is around 1 ms
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SOMA_REF , DAC_DEFAULTS['DAC_SOMA_REF'])
# DAC_SOMA_OFFSET scales the bias twiddle bits
# Ben says that increasing this beyond 10 could cause badness
self.driver.SetDACCount(CORE_ID , bd.bdpars.BDHornEP.DAC_SOMA_OFFSET , DAC_DEFAULTS['DAC_SOMA_OFFSET'])
self.driver.SetTimeUnitLen(self.downstream_ns) # 10 us downstream resolution
self.driver.SetTimePerUpHB(self.upstream_ns) # 1 ms upstream resolution/tag binning
def set_time_resolution(self, downstream_ns=10000, upstream_ns=1000000):
"""Controls Driver/FPGA time resolutions
Parameters
==========
downstream_ns: int (optional)
Controls the fineness of when the FPGA can inject inputs to BD.
Also controls the time resolution of the FPGA tag stream generators
(set_input_rates() periods will be a multiple of this).
upstream_ns: int (optional)
Controls the period of upstream heartbeats from the FPGA.
Every upstream_ns, the FPGA reports the current time.
The Driver uses the most recent HB to timestamp upstream traffic.
Also controls the period with which the FPGA emits filtered outputs.
get_outputs() will have a new entry every upstream_ns.
"""
self.driver.SetTimeUnitLen(downstream_ns) # 10 us downstream resolution
self.driver.SetTimePerUpHB(upstream_ns) # 1 ms upstream resolution/tag binning
self.downstream_ns = downstream_ns
self.upstream_ns = upstream_ns
def __del__(self):
self.stop_hardware()
def get_time(self):
"""Returns the time in nanoseconds"""
return self.driver.GetFPGATime()
def reset_time(self):
self.driver.ResetFPGATime()
def start_hardware(self):
"""Starts the driver"""
comm_state = self.driver.Start()
assert comm_state >= 0, "Comm failed to init"
def stop_hardware(self):
"""Stops the driver"""
self.driver.Stop()
##############################################################################
# Data flow functions #
##############################################################################
def flush(self):
"""Commits any queued up traffic
Inputs to HAL that have a time parameter (set_input_rate(s)()) are not committed
to the hardware until flushed. After being flushed, inputs are committed
to BD in the order of their times. This will block any subsequently flushed
inputs until the maximum previously flushed time has elapsed.
This call (along with the flush parameters of other calls) gives the HAL
user some freedom in ordering their calls.
"""
self.driver.Flush()
def start_traffic(self, flush=True):
"""Start hardware's internal traffic flow"""
self.driver.SetTagTrafficState(CORE_ID, True, flush=False)
self.driver.SetSpikeTrafficState(CORE_ID, True, flush=flush)
def stop_traffic(self, flush=True):
"""Stop hardware's internal traffic flow"""
self.driver.SetTagTrafficState(CORE_ID, False, flush=False)
self.driver.SetSpikeTrafficState(CORE_ID, False, flush=flush)
def enable_output_recording(self, flush=True):
"""Turns on recording from all outputs.
These output values are binned and go into a buffer
that can be drained by calling get_outputs().
"""
N_SF = self.last_mapped_core.FPGASpikeFilters.filters_used
self.driver.SetNumSpikeFilters(CORE_ID, N_SF, flush=flush)
def enable_spike_recording(self, flush=True):
"""Turns on spike recording from all neurons.
These spikes will go into a buffer that can be drained by calling
get_spikes().
"""
self.driver.SetSpikeDumpState(CORE_ID, en=True, flush=flush)
def disable_output_recording(self, flush=True):
"""Turns off recording from all outputs."""
# by setting the number of spike filters to 0, the FPGA SF array
# no longer reports any values
self.driver.SetNumSpikeFilters(CORE_ID, 0, flush=flush)
def disable_spike_recording(self, flush=True):
"""Turns off spike recording from all neurons."""
self.driver.SetSpikeDumpState(CORE_ID, en=False, flush=flush)
def get_overflow_counts(self):
"""prints the total number of FIFO overflows"""
o0, o1 = self.driver.GetFIFOOverflowCounts(CORE_ID)
return o0 + o1
def get_outputs(self, timeout=1000):
"""Returns all binned output tags gathered since this was last called.
Data format: a numpy array of : [(time, output, dim, counts), ...]
Timestamps are in nanoseconds
Counts are the number of tags received since the last report:
Every FPGA time unit, the Spike Filter array loops through N_SF
filters, reports the tallied tag counts since the last report,
and resets each count to 0
Whether or not you get return values is enabled/disabled by
enable/disable_output_recording()
"""
filt_idxs, filt_states, times = self.driver.RecvSpikeFilterStates(CORE_ID, timeout)
outputs, dims, counts = self.last_mapped_network.translate_tags(filt_idxs, filt_states)
return np.array([times, outputs, dims, counts]).T
def get_binned_spikes(self, bin_time_ns):
"""Returns all the pending spikes gathered since this was last called.
Returns one numpy array per pool. Highest performance if binning is ultimately desired.
Inputs:
======
bin_time_ns: binning interval. All spikes currently-queued in the driver will be placed in a bin.
Be cautious calling this multiple times in sucession, bins will not be aligned.
Using upstream time resolution equal to binning interval is recommended, and avoids
this issue (although bins may still overlap, they will align)
Output:
=======
Data format: tuple(dict of numpy arrays, array of bin times):
({pool0id:[[bin0 data], ..., [binN data]], ..., poolNid:[[bin0 data], ..., [binN data]]},
[bin0 time, ..., binN time])
Timestamps are in nanoseconds
"""
import time
binned_spikes, bin_times = self.driver.RecvBinnedSpikes(CORE_ID, bin_time_ns)
trans_spikes = self.last_mapped_network.translate_binned_spikes(binned_spikes)
return trans_spikes, bin_times
def get_array_outputs(self):
"""Returns all binned output tags gathered since this was last called,
Each Output is associated with an array of values, indexed by time bin index and dimension
Whether or not you get return values is enabled/disabled by
enable/disable_output_recording()
Binning interval is controlled by set_time_resolution()'s upstream_ns parameter.
Outputs:
=======
Data format: tuple({output_id : (np.array of values indexed [time_bin_idx, dimension])},
(np.array of time bin values for all output_ids))
Timestamps are in nanoseconds
"""
N_SF = self.last_mapped_core.FPGASpikeFilters.filters_used
tag_arr, bin_times = self.driver.RecvSpikeFilterStatesArray(CORE_ID, N_SF)
return self.last_mapped_network.translate_tag_array(tag_arr), bin_times
def get_spikes(self):
"""Returns all the pending spikes gathered | |
"""Performing spectral clustering on neural networks and calculating its p-values."""
import numpy as np
import math
#from multiprocessing import Pool
from pathos.multiprocessing import ProcessPool
import scipy.sparse as sparse
from utils import splitter, load_weights, compute_pvalue
from sklearn.cluster import SpectralClustering
from sklearn.neighbors import KernelDensity
import itertools as it
import copy
import pickle
import networkx as nx
import time
import matplotlib.pyplot as plt
#from sacred import Experiment
#from sacred.observers import FileStorageObserver
from collections import Counter, deque
#from utils import splitter, load_weights, compute_pvalue
# import ipdb
"""
from cnn import CNN_MODEL_PARAMS
from cnn.extractor import extract_cnn_weights"""
SHUFFLE_METHODS = ['layer',
'layer_nonzero',
'layer_nonzero_distribution',
'layer_all_distribution']
# set up some sacred stuff
"""clustering_experiment = Experiment('cluster_model')
clustering_experiment.observers.append((FileStorageObserver.create('clustering_runs')))
"""
<EMAIL>
def my_config():
weights_path = "training_runs_dir/10/pruned.pckl"
num_clusters = 4
eigen_solver = 'amg'
assign_labels = 'kmeans'
epsilon = 1e-8
delete_isolated_ccs_bool = True
with_labels = False
with_shuffle = True
shuffle_method = 'layer'
# with different number of worker, the statistics might be slightly different
# maybe because different order of calls to the random generator?
# but the statistics are the same for given `seed` and `n_workers`
n_workers = 10
is_testing = False
with_shuffled_ncuts = False
<EMAIL>
def cnn_config():
network_type = 'cnn'
max_weight_convention = 'one_on_n' # or 'all_one'
input_shape = (28, 28, 1)
conv_layers = CNN_MODEL_PARAMS['conv']
fc_layer_widths = CNN_MODEL_PARAMS['dense']
shuffle_smaller_model = False
num_samples = 120
n_workers = 30
as_sparse = True
<EMAIL>
def mlp_config():
network_type = 'mlp'
shuffle_smaller_model = True
num_samples = 200
as_sparse = False
def mlp_tup_to_int(tup, layer_widths):
# tuple represents (layer, neuron_in_layer). int goes from first
# neuron of first layer to last neuron of last layer.
# both elements of tup are zero-indexed.
layer, node = tup
accum = 0
for (i, width) in enumerate(layer_widths):
if i == layer:
accum += node
break
else:
accum += width
return accum
def mlp_int_to_tup(num, layer_widths):
# inverse of mlp_tup_to_int
accum = num
# have counter that starts off at num, subtract stuff off at
# each layer
for (l, width) in enumerate(layer_widths):
accum -= width
if accum < 0:
return l, accum + width
# this should never happen
return None
def cnn_tup_to_int(tup, layer_shapes):
# tuple represents (layer, num_down, num_across, channel). we treat the
# output of fully connected layers as having one channel and being purely
# horizontal.
layer, down, across, channel = tup
accum = 0
for (l, shape) in layer_shapes:
if l == layer:
accum += down * layer_shapes[1] * layer_shapes[2]
accum += across * layer_shapes[2]
accum += channel
break
else:
accum += np.product(shape)
return accum
# TODO: check that these functions are inverses
def cnn_int_to_tup(num, layer_shapes):
'''
inverse of cnn_tup_to_int (in the first argument)
'''
accum = num
for (l, shape) in enumerate(layer_shapes):
if accum < np.product(shape):
height = shape[0]
width = shape[1]
num_channels = shape[2]
row = int(accum / (width * num_channels))
accum -= height * width * num_channels
column = int(accum / num_channels)
accum -= column * num_channels
channel = accum
return row, column, channel
else:
accum -= np.product(shape)
# this should never happen
return None
<EMAIL>
def layer_info_to_layer_shapes(output_shape, input_shape, conv_layers,
fc_layer_widths):
'''
take in the input shape, a list of dicts specifying info about the
convolutional layers, a list of dicts with the widths of fully connected
layers, and the output shape, and returns an array specifying the shape of
each layer of the network
'''
layer_shapes = [input_shape]
for conv_dict in conv_layers:
# after normal convolutional layer, change the number of channels, but
# keep the width and height (due to SAME padding)
filters = conv_dict['filters']
prev_shape = layer_shapes[-1]
layer_shapes.append((prev_shape[0], prev_shape[1], filters))
if conv_dict['max_pool_after']:
k_size = conv_dict['max_pool_size']
stride = conv_dict['max_pool_size'] # conv_dict['max_pool_stride']
padding = conv_dict['max_pool_padding']
# calculations here are from https://www.corvil.com/kb/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-tensorflow
if padding == 'same':
new_height = math.ceil(prev_shape[0] / stride[0])
new_width = math.ceil(prev_shape[1] / stride[1])
elif padding == 'valid':
new_height = math.ceil((prev_shape[0] - k_size[0] + 1)
/ stride[0])
new_width = math.ceil((prev_shape[1] - k_size[1] + 1)
/ stride[1])
else:
raise ValueError("max_pool_padding should be same or valid, but instead is " + padding)
layer_shapes.append((new_height, new_width, prev_shape[0]))
for width in fc_layer_widths:
layer_shapes.append((1, width, 1))
layer_shapes.append(output_shape)
return layer_shapes
def weights_to_layer_widths(weights_array):
'''
take in an array of weight matrices, and return how wide each layer of the
network is
'''
layer_widths = []
for weight_mat in weights_array:
layer_widths.append(weight_mat.shape[0])
final_width = weights_array[-1].shape[1]
layer_widths.append(final_width)
return layer_widths
<EMAIL>
def cnn_layers_to_weights_array(layer_shapes, weight_tensors, conv_layers):
'''
take in an array of layer shapes, information about the convolutional
layers, and an array of weight tensors, and return an array of weight
matrices for the 'unrolled' neural network
'''
# TODO: assert that I'm doing shapes right
weight_matrices = []
layer_shape_stack = deque(layer_shapes)
for i, conv_dict in enumerate(conv_layers):
next_shape = layer_shape_stack.popleft()
conv_tensor = weight_tensors[i]
if conv_dict['max_pool_after']:
_ = layer_shape_stack.popleft()
weight_matrices += conv_layer_to_weight_mats(next_shape, conv_dict,
conv_tensor)
for w_mat in weight_matrices:
assert len(w_mat.shape) == 2, f"conv_layer_to_weight_mats should have output rank 2 tensors, but actually output something with shape {w_mat.shape}"
for j in range(len(conv_layers), len(weight_tensors)):
assert len(weight_tensors[j].shape) == 2, f"in cnn_layers_to_weights_array, should be adding only rank 2 tensors to weight_matrices array, but instead added something with shape {weight_tensors[j].shape}"
weight_matrices.append(weight_tensors[j])
return weight_matrices
# TODO: use my code instead this function
<EMAIL>
def conv_layer_to_weight_mats(in_layer_shape, conv_dict, conv_tensor,
max_weight_convention):
'''
take in the shape of the incoming layer, a dict representing info about the
conv operation, and the weight tensor of the convolution, and return an
array of sparse weight matrices representing the operation. the array should
have a single element if layer_dict['max_pool_after']==False, but should have
two (one representing the action of the max pooling) otherwise.
for max pooling, we linearise by connecting the maxed neurons to everything
in their receptive field. if max_weight_convention=='all_one', all the
weights are one, otherwise if max_weight_convention=='one_on_n', the weights
are all one divided by the receptive field size
'''
# TODO: see if vectorisation will work
kernel_height, kernel_width, n_chan_in, n_chan_out = conv_tensor.shape
in_height = in_layer_shape[0]
in_width = in_layer_shape[1]
assert (kernel_height, kernel_width) == tuple(conv_dict['kernel_size']), f"weight tensor info doesn't match conv layer dict info - kernel size from conv_tensor.shape is {(kernel_height, kernel_width)}, but conv_dict says it's {conv_dict['kernel_size']}"
assert n_chan_out == conv_dict['filters'], f"weight tensor info doesn't match conv layer dict info: weight tensor says num channels out is {n_chan_out}, conv dict says it's {conv_dict['filters']}"
assert in_layer_shape[2] == n_chan_in, f"weight tensor info doesn't match previous layer shape: weight tensor says it's {n_chan_in}, prev layer says it's {in_layer_shape[2]}"
kernel_height_centre = int((kernel_height - 1) / 2)
kernel_width_centre = int((kernel_width - 1) / 2)
in_layer_size = np.product(in_layer_shape)
out_layer_shape = (in_height, in_width, n_chan_out)
out_layer_size = np.product(out_layer_shape)
conv_weight_matrix = np.zeros((in_layer_size, out_layer_size))
# THIS WORKS ONLY FOR SAME and not for VALID!!!
for i in range(in_height):
for j in range(in_width):
for c_out in range(n_chan_out):
out_int = cnn_layer_tup_to_int((i,j,c_out), out_layer_shape)
for n in range(kernel_height):
for m in range(kernel_width):
for c_in in range(n_chan_in):
weight = conv_tensor[n][m][c_in][c_out]
h_in = i + n - kernel_height_centre
w_in = j + m - kernel_width_centre
in_bounds_check = (h_in in range(in_height)
and w_in in range(in_width))
if in_bounds_check:
in_int = cnn_layer_tup_to_int((h_in, w_in,
c_in),
in_layer_shape)
conv_weight_matrix[in_int][out_int] = weight
weights_array = [conv_weight_matrix]
if conv_dict['max_pool_after']:
k_height, k_width = conv_dict['max_pool_size']
stride = conv_dict['max_pool_size'] # conv_dict['max_pool_stride']
padding = conv_dict['max_pool_padding']
if max_weight_convention == 'all_one':
max_weight = 1
elif max_weight_convention == 'one_on_n':
max_weight = 1 / (k_height * k_width)
else:
raise ValueError("max_weight_convention must be 'one_on_n' or 'all_one', is instead" + max_weight_convention)
# This code works on valid, I tested it
# But if input_side is divisible by stride, it is the same
if padding == 'valid':
maxed_height = math.ceil(in_height / stride[0])
maxed_width = math.ceil(in_width / stride[1])
maxed_shape = (maxed_height, maxed_width, n_chan_out)
maxed_size = np.product(maxed_shape)
max_matrix = np.zeros((out_layer_size, maxed_size))
k_height_centre = int((k_height - 1) / 2)
k_width_centre = int((k_width - 1) / 2)
for i in range(maxed_height):
for j in range(maxed_width):
for c in range(n_chan_out):
max_int = cnn_layer_tup_to_int((i,j,c), maxed_shape)
for n in range(k_height):
for m in range(k_width):
h_in = stride[0] * i + n - k_height_centre
w_in = stride[1] * j + m - k_width_centre
in_bounds_check = (h_in in range(in_height)
and
w_in in range(in_width))
if in_bounds_check:
out_int = cnn_layer_tup_to_int((h_in, w_in,
c),
out_layer_shape)
max_matrix[out_int][max_int] = max_weight
# originally, this was 'valid`, but I don't know what it is
# this code reaise an IndexError
elif | |
#-------------------------------------------------------------------------
# Description: HSF (High Seas Forecast)
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Version: 26 July 2016 - Received from <NAME>
##
#
# SOFTWARE HISTORY
# Date Ticket# Engineer Description
# ----------- ---------- ----------- --------------------------
# 07/29/2016 - tlefebvre Changed edit area retrieval and storage to work
# outside CAVE so edit areas could be shared.
# 12/20/2017 DCS17686 tlefebvre Initial baseline version.
#
##
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# HSF.py, HSF _<site>_<MultiPil>_Definition, HSF_<site>_Override
#-------------------------------------------------------------------------
# Customization Points:
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
# defaultEditAreas defines edit areas, default is Combinations
#
# productName defines name of product e.g. "COASTAL WATERS FORECAST"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "OFFBOS"
# areaName (opt.) Area name for product header, such as "WESTERN NEW YORK"
# wfoCityState City,state that the WFO is located in, such as "BUFFALO, NY"
#
# synopsisUGC UGC code for Synopsis
# synopsisHeading Heading for Synopsis
#
# Optional Configuration Items
#
# editAreaSuffix default None. Allows for generating the body of the product for
# an edit area that is a subset (e.g. population areas) of the
# edit areas specified in the defaultEditAreas. So given the edit area,
# "COZ035" and the editAreaSuffix is "_pt", then the edit area that
# will be sampled and reported for the body of the product will be
# "COZ035_pt". If no such edit area exists, the system will simply
# use the original edit area.
# Note that Hazards will always be generated for the entire edit area.
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product
# when saved from the Formatter Launcher.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database.
# This value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the awips product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN.
# This value is also used for the default GUI
# entry for storage.
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
#
# periodCombining If 1, an attempt will be made to combine components
# or time periods into one. Otherwise no period
# combining will will be done.
# includeEveningPeriod Include a 6 hour Evening period on the 3rd day
# useAbbreviations
# If 1, use marine abbreviations e.g. TSTM instead of THUNDERSTORM,
# NW instead of NORTHWEST
# (See marine_abbreviateText in the TextRules module)
#
# Weather-related flags
# hoursSChcEnds - specifies hours past the beginning of the first
# first period of the product to stop including 'Slight
# Chance' or 'Isolated' weather types (ERH policy
# allows values of 1-5 * 12 hour periods)
#
# areaDictionary Modify the AreaDictionary utility with UGC
# information about zones
#
# useHolidays Set to 1 to use holidays in the time period labels
#
# Trouble-shooting items
# passLimit -- Limit on passes allowed through Narrative Tree
# trace -- Set to 1 to turn on trace through Narrative Tree
#
# OVERRIDES
#
# Required Overrides
#
# _Text1(), _Text2() Descriptive text for header
#
# NARRATIVE CUSTOMIZATION POINTS
# The phrases in this product can be customized in many ways by overriding
# infrastructure methods in the Local file.
# You will see common overrides in the Local file and you may change them
# in that there.
# For further customization, you can determine which phrases your product is
# using by examining the Component Product Definitions below.
# Then, you can look up the phrase in the Text Product User Guide which will
# describe the all the relevant override methods associated with the phrase.
# Refer to the Customization section of the Text Product User Guide
# for step-by-step information.
#
#-------------------------------------------------------------------------
# Weather Elements Needed:
# Wind (every 3 hours to 3 days, then every 6 hours to 7 days)
# WaveHeight and/or WindWaveHgt
# (every 6 hours to 3 days, then every 12 hours to 7 days)
# Wx (every 6 hours to 3 days, then every 12 hours to 7 days)
# Optional:
# WindGust (every 3 hours to 7 days)
# Swell, Swell2, Period, Period2 (every 6 hours to 7 days)
#-------------------------------------------------------------------------
# Edit Areas Needed: None
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations
#-------------------------------------------------------------------------
# Component Products:
# OFFPeriod (component)
# OFFPeriodMid (component)
# OFFExtended (component)
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#
# COMMON OVERRIDES
# from OFF:
# _Text1
# _Text2
# _issuance_list
# riverBarForecast_dict
# from MarinePhrases
# inlandWatersAreas
# inlandWatersWave_element
# seasWaveHeight_element
# seasWindWave_element
# waveHeight_wind_threshold
# marine_wind_flag
# marine_wind_combining_flag
# marine_wind_verbose_flag
# from ConfigVariables
# phrase_descriptor_dict
# phrase_connector_dict
# null_nlValue_dict
# first_null_phrase_dict
# null_phrase_dict
# maximum_range_nlValue_dict
# combine_singleValues_flag_dict
# from WxPhrases:
# embedded_visibility_flag
# visibility_wx_threshold
# significant_wx_visibility_subkeys
# wxCoverageDescriptors
# wxTypeDescriptors
# wxAttributeDescriptors
# wxIntensityDescriptors
# wxCombinations
# combine_T_RW
# from SampleAnalysis
# moderated_dict
#-------------------------------------------------------------------------
# Example Output:
# Refer to the NWS Directives for Marine Services.
#-------------------------------------------------------------------------
import TextRules
import SampleAnalysis
import ForecastNarrative
import time, string, re, types, cPickle, os, textwrap, sys
import TimeRange
import AbsTime
from math import *
import numpy
import copy
import UserInfo
import subprocess
import xml.etree.ElementTree as ET
import EditAreaUtilities
import sys
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceID
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData
#from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData_CoordinateType as CoordinateType
class Node:
def __init__(self, childList, methodList):
self.childList = childList
self.methodList = methodList
self.parent = None
# Make tree bi-directional
for child in childList:
child.parent = self
# Keep track of changes made to this node
self.changeFlag = 0
# Keep track of methods that are done
self.doneList = []
def getIndex(self):
# If this node is a child,
# return it's index in the childList of the parent
try:
return self.parent.childList.index(self)
except:
return None
def getParent(self):
return self.parent
def getComponent(self):
# Return this node's ancestor at the second level in the tree
prevNode = None
node = self
i = 0
while node.getParent() is not None and i < 100:
prevNode = node
node = node.getParent()
i = i + 1
return prevNode
def getComponentName(self):
node = self
compName = node.get("componentName")
if compName is not None:
return compName
else:
comp = node.getComponent()
if comp is not None:
return comp.get("name")
else:
return None
def getNext(self):
if self.parent is not None:
index = self.getIndex()
childList = self.parent.childList
if len(childList) > index+1:
return childList[index+1]
def getPrev(self):
if self.parent is not None:
index = self.getIndex()
childList = self.parent.childList
if index > 0:
return childList[index-1]
def set(self, member, value):
#print " Setting", member,
if hasattr(self, member):
current = getattr(self, member)
#print "current/value", current, value
if current == value:
#print " No Change"
return
setattr(self, member, value)
self.changeFlag = 1
#print " Changed"
def get(self, member, default=None):
if hasattr(self, member):
return getattr(self, member)
else:
return default
def printNode(self, node, indentStr=""):
print "Node", node
print indentStr + " Methods"
for method in node.methodList:
if method in node.doneList:
done = "DONE"
else:
done = ""
print indentStr + " ", method.__name__, done
print indentStr + " Attributes"
dict = node.__dict__
for key in dict:
if key == "methodList" or key == "doneList":
continue
print indentStr + " ", key, dict[key]
print indentStr + " Children ", len(node.childList)
for child in node.childList:
self.printNode(child, indentStr + " ")
def insertChild(self, sibling, newChild, newFirst=0):
# Insert the newChild
# If newFirst, insert newChild before sibling,
# else afterward.
newChild.parent = self
new = []
for child in self.childList:
if child == sibling:
if newFirst:
new.append(newChild)
new.append(child)
else:
new.append(child)
new.append(newChild)
else:
new.append(child)
self.childList = new
def remove(self):
# Remove this node from it's parent child list
parent = self.parent
new = []
for child in parent.childList:
if child != self:
new.append(child)
parent.childList = new
# Set the attribute for | |
# -*- coding: utf-8 -*-
"""
Created on Sat May 13 15:35:42 2016
@version:0.2.4./pyc
@author: Nackel
"""
import sys
import re
import time
import os
from itertools import combinations_with_replacement, permutations, product
import numpy as np
from data import index_list
from util_sc import is_rnasc_list
from util_sc import get_rnasc_data
from util_sc import get_corresp_sequence
from util import write_to_file
def get_kmer_lst(letterlst, k):
"""Generate a list of all possible k-mer pattern.
:param letter: A list that contains all the possible letters in the sequence.
:param k: The length of k-mer.
:return: A kmer list.
"""
kmerlst = []
letter_set = set(letterlst)
letter = [''.join(i) for i in letter_set]
partkmers = list(combinations_with_replacement(letter, k))
for element in partkmers:
elelst = set(permutations(element, k))
strlst = [''.join(ele) for ele in elelst]
kmerlst += strlst
kmerlst = np.sort(kmerlst)
return list(kmerlst)
def delete_free_base(seqss):
"""Delete free base based on secondary structure to produce a new sequence and secondary structure. New sequence and secondary structure is a substring of the original sequence and secondary structure.
:param seqss: a seqss object.
:return: A new sequence and sstructure,string.
"""
left_pos = seqss.sstructure.index('(')
right_pos = seqss.sstructure.rindex(')')
return seqss.sequence[left_pos:right_pos+1], seqss.sstructure[left_pos:right_pos+1]
def delete_loop(seqss):
"""Delete loop(hairpin) based on secondary structure to produce a new sequence and secondary structure. New sequence and secondary structure is a substring of the original sequence and secondary structure.
:param seqss: a seqss object.
:return: A new sequence and sstructure,string.
"""
loop_re = r'(\(\.+\))'
loop_list = re.findall(loop_re, seqss.sstructure)
for loop in loop_list:
pos = seqss.sstructure.index(loop)
length = len(loop)
sstructure_dl = seqss.sstructure[:pos+1] + seqss.sstructure[pos+length-1:]
sequence_dl = seqss.sequence[:pos+1] + seqss.sequence[pos+length-1:]
return sequence_dl, sstructure_dl
#======================Complete process in Triplet=============================
def get_triplet_matrix(filename):
'''This is a complete process in triplet,aim to generate feature vectors.
The FASTA format of the input file is as follows:
>Sequence name
An RNA sequence should be consist of AGCU
Secondary structure
:param filename: Name of inputfile.
:return: Feature matrix through Triplet.
'''
letter = ["(","."]
alphabet = 'AGCU' #Don't change the alphabetical, or the order of features will change.
with open(filename) as f:
seqsslst= get_rnasc_data(f)
tripletdict = get_triplet_dict(letter, 3, alphabet)
features = []
for seqss in seqsslst:
vector = get_triplet_vector(seqss, tripletdict)
features.append(vector)
return features
def get_triplet_vector(seqss,patterndict):
'''This is a process in triplet,aim to generate feature vector.
:param seqss: a seqss object.
:param patterndict: All the features, dictionary.
:return: Feature vector through Triplet.
'''
vector=np.zeros((1,len(patterndict)))
sequence, sstructure = delete_free_base(seqss)
sequence, sstructure = delete_loop(seqss)
for i in range(len(seqss.sequence)):
letter =seqss.sequence[i]
middle = seqss.sstructure[i]
if i == 0:
near_left = "."
near_right = seqss.sstructure[i+1]
elif i == len(seqss.sequence)-1:
near_left = seqss.sstructure[i-1]
near_right = "."
else:
near_left = seqss.sstructure[i-1]
near_right = seqss.sstructure[i+1]
#rectify the empty loop structure
if middle == '(' and near_right == ')':
near_right = '.'
if middle == ')' and near_left == '(':
near_left = '.'
letter_sstruc_comb = letter+near_left+middle+near_right
letter_sstruc_comb_r = letter_sstruc_comb.replace(')', '(')
position = patterndict.get(letter_sstruc_comb_r)
vector[0, position] += 1
#print letter_sstruc_comb ,position
#return list (vector[0])
return [round(f,3) for f in list(vector[0]/sum(vector[0]))]
def get_triplet_dict(letter, k, alphabet=index_list.RNA):
"""Generate a dictionary of all possible triplet pattern.
:param letter: A list that contains all the possible characters in the secondary structure. eg:['.','(']
:param k: The length of k-mer.
:param alphabet: A string that contains all the possible characters in the sequence.
:return: A triplet dictionary.
"""
kmerlst = get_kmer_lst(letter, k)
kmerlst.reverse()
tripletlst = [''.join(ele) for ele in product(list(alphabet), kmerlst)]
#tripletlst = np.sort(tripletlst)
tripletdict = {tripletlst[i]: i for i in range(len(tripletlst))}
return tripletdict
#=========================PseKNC===============================================
def get_pseknc_matrix(filename, k):
'''This is a complete process in PseKNC,aim to generate feature matrix.
The FASTA format of the input file is as follows:
>Sequence name
An RNA sequence should be consist of AGCU
Secondary structure
:param filename: Name of input file.
:return: Feature matrix through PseKNC.
'''
alphabet = 'ACGU'
letter = list(alphabet)
with open(filename) as f:
seqsslst = get_rnasc_data(f)
psekncdict = get_pseknc_dict(letter, k)
features = []
for seqss in seqsslst:
vector = get_pseknc_vector(seqss, psekncdict, k)
features.append(vector)
return features
def get_pseknc_dict(letter_list, k):
"""Generate a dictionary of all possible PseKNC pattern.
:param letter: A list that contains all the possible characters in an RNA sequence. eg:['A','C','G','U']
:param k: The length of K-tuple nucleotide composition.
:return: A PseKNC pattern dictionary.
"""
pseknclst = []
part_psessc = list(combinations_with_replacement(letter_list, k))
for element in part_psessc:
elelst = set(permutations(element, k))
pseknclst += elelst
pseknclst.sort()
psekncdict = {pseknclst[i]:i for i in range(len(pseknclst))}
return psekncdict
def get_pseknc_vector(seqss, k, letter_list = ['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G']):
'''This is a process in PseKNC, aim to generate feature vector.
:param seqss: a seqss object.
:param psekncdict: All the features, dictionary.
:param k: The length of K-tuple nucleotide composition.
:param letter_list: default ['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G'].
:return: Feature vector through PseKNC.
'''
psekncdict = get_pseknc_dict(letter_list, k)
vector = np.zeros((1, len(psekncdict)))
correspseq = get_corresp_sequence(seqss)
pattern = zip(list(seqss.sequence),list(correspseq))
for i in xrange(len(pattern)-k+1):
stem = []
for x, y in pattern[i:i+k]:
if x == '.' or y == '.':
if x == '.':
stem.append(y)
else:
stem.append(x)
else:
stem.append(x + '-' + y)
stem_tuple= tuple(stem)
position = psekncdict.get(stem_tuple)
vector[0, position] += 1
#print stem_tuple,position
#return vector[0]
return list(vector[0]/sum(vector[0]))
#return [round(f,4) for f in list(vector[0]/sum(vector[0]))]
#=========================PseSSC===============================================
def get_psessc_matrix(filename, n, r, w, pattern_list = ['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G']):
'''This is a complete process in PseSSC, aim to generate feature matrix.
The FASTA format of the input file is as follows:
>Sequence name
An RNA sequence should be consist of AGCU
Secondary structure
:param filename: Name of input file.
:param n: The number of n adjacent structure statuses.
:param r: The highest counted rank (or tier) of the structural correlation along a RNA chain.
:param w: The wight of theta, from 0.1 to 1.
:param pattern_list: Structure statuses, default:['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G'].
:return: Feature matrix through PseSSC.
'''
with open(filename) as f:
seqsslst= get_rnasc_data(f)
features = []
for seqss in seqsslst:
vector = get_psessc_vector(seqss, n, r, w, pattern_list)
features.append(vector)
return features
def get_psessc_vector(seqss, n, r, w, pattern_list = ['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G']):
'''This is a complete process in PseSSC, aim to generate feature vector.
:param seqss: a seqss object.
:param n: The number of n adjacent structure statuses.
:param r: The highest counted rank (or tier) of the structural correlation along an RNA chain.
:param w: The wight of theta, from 0.1 to 1.
:param pattern_list: Structure statuses, default:['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', 'C-G', 'G-U', 'U-G'].
:return: Feature vector through PseSSC.
'''
#psekncdict = get_pseknc_dict(pattern_list, k)
if n > seqss.length or n <= 0:
error_info = 'Error occured in ' + seqss.id + ', n should be less than the length of the sequence and large than 0.\n'
sys.stderr.write(error_info)
elif r >= seqss.length:
error_info = 'Error occured in ' + seqss.id + ', r should be less than the length of the sequence.\n'
sys.stderr.write(error_info)
else:
psekncvec = get_pseknc_vector(seqss, n, pattern_list)
psesscvec_tmp = np.array(psekncvec)
for i in range(1, r+1):
psesscvec_tmp = np.hstack((psesscvec_tmp, w * calculate_theta(seqss, i)))
psesscvec = psesscvec_tmp / sum(psesscvec_tmp)
#return psesscvec
return [round(f,4) for f in psesscvec]
def calculate_theta(seqss, j):
'''calculate theta
:param seqss: a seqss object.
:param j: the counted rank (or tier) of the structural correlation along a RNA chain.
:return: theta.
'''
if j >= len(seqss.sequence):
error_info = 'Error occured in '+seqss.id +', r should be less than the length of the sequence.'
sys.stderr.write(error_info)
else:
correspseq = get_corresp_sequence(seqss)
pattern = zip(list(seqss.sequence), list(correspseq))
stem=[]
for x,y in pattern:
if x == '.' or y == '.':
if x == '.':
stem.append(y)
else:
stem.append(x)
else:
stem.append(x + '-' + y)
freevalue_vector = []
for i in stem:
if i == 'A-U' or i == 'U-A':
freevalue_vector.append(-2)
elif i == 'C-G' or i == 'G-C':
freevalue_vector.append(-3)
elif i == 'U-G' or i == 'G-U':
freevalue_vector.append(-1)
else:
freevalue_vector.append(0)
s=0.0
for i in range(len(freevalue_vector)-j):
s += (freevalue_vector[i] - freevalue_vector[i+j]) ** 2
#print i,i+j
#print (freevalue_vector[i] - freevalue_vector[i+r])
#print s,len(freevalue_vector)-r
return s / (len(freevalue_vector)-j)
#=========================PseDPC===============================================
def get_psedpc_matrix(filename, n, r, w, pattern_list = ['A', 'C', 'G', 'U', 'A-U', 'U-A', 'G-C', | |
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import sys
import pandas as pd
import numpy as np
import keras
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from Levenshtein import distance as levenshtein_distance
from edit_distance.train import load_edit_distance_dataset
from util.data_handling.data_loader import get_dataloaders
from util.ml_and_math.loss_functions import AverageMeter
import numpy as np
import pickle
import pandas as pd
from scipy.stats import mode
from edit_distance.task.dataset_generator_genomic import EditDistanceGenomicDatasetGenerator
# from hypersmorf.myfunctions import create_parser, generate_datasets, run_model
import torch
import torch.nn as nn
import numpy as np
from geomstats.geometry.poincare_ball import PoincareBall
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def square_distance(t1_emb, t2_emb,scale=1):
D = t1_emb - t2_emb
d = torch.sum(D * D, dim=-1)
return d
def euclidean_distance(t1_emb, t2_emb,scale=1):
D = t1_emb - t2_emb
d = torch.norm(D, dim=-1)
return d
def cosine_distance(t1_emb, t2_emb,scale=1):
return 1 - nn.functional.cosine_similarity(t1_emb, t2_emb, dim=-1, eps=1e-6)
def manhattan_distance(t1_emb, t2_emb,scale=1):
D = t1_emb - t2_emb
d = torch.sum(torch.abs(D), dim=-1)
return d
def hyperbolic_geomstats_distance(u,v,scale=1):
return PoincareBall(u.size()[1]).metric.dist(u,v)
def hyperbolic_distance(u, v, epsilon=1e-7): # changed from epsilon=1e-7 to reduce error
sqdist = torch.sum((u - v) ** 2, dim=-1)
squnorm = torch.sum(u ** 2, dim=-1)
sqvnorm = torch.sum(v ** 2, dim=-1)
x = 1 + 2 * sqdist / ((1 - squnorm) * (1 - sqvnorm)) + epsilon
z = torch.sqrt(x ** 2 - 1)
return torch.log(x + z)
def hyperbolic_distance_numpy(u, v, epsilon=1e-9):
sqdist = np.sum((u - v) ** 2, axis=-1)
squnorm = np.sum(u ** 2, axis=-1)
sqvnorm = np.sum(v ** 2, axis=-1)
x = 1 + 2 * sqdist / ((1 - squnorm) * (1 - sqvnorm)) + epsilon
z = np.sqrt(x ** 2 - 1)
return np.log(x + z)
DISTANCE_TORCH = {
'square': square_distance,
'euclidean': euclidean_distance,
'cosine': cosine_distance,
'manhattan': manhattan_distance,
'hyperbolic': hyperbolic_distance
}
import argparse
import os
import pickle
import sys
import time
from types import SimpleNamespace
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from edit_distance.task.dataset import EditDistanceDatasetSampled, EditDistanceDatasetComplete,EditDistanceDatasetSampledCalculated
from edit_distance.task.dataset import EditDistanceDatasetCompleteCalculated
from edit_distance.models.hyperbolics import RAdam
from edit_distance.models.pair_encoder import PairEmbeddingDistance
from util.data_handling.data_loader import get_dataloaders
from util.ml_and_math.loss_functions import MAPE
from util.ml_and_math.loss_functions import AverageMeter
def general_arg_parser():
""" Parsing of parameters common to all the different models """
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='../../data/edit_qiita_small.pkl', help='Dataset path')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training (GPU)')
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--epochs', type=int, default=2, help='Number of epochs to train')
parser.add_argument('--lr', type=float, default=0.001, help='Initial learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout rate (1 - keep probability)')
parser.add_argument('--patience', type=int, default=50, help='Patience')
parser.add_argument('--print_every', type=int, default=1, help='Print training results every')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size')
parser.add_argument('--embedding_size', type=int, default=5, help='Size of embedding')
parser.add_argument('--distance', type=str, default='hyperbolic', help='Type of distance to use')
parser.add_argument('--workers', type=int, default=0, help='Number of workers')
parser.add_argument('--loss', type=str, default="mse", help='Loss function to use (mse, mape or mae)')
parser.add_argument('--plot', action='store_true', default=False, help='Plot real vs predicted distances')
parser.add_argument('--closest_data_path', type=str, default='', help='Dataset for closest string retrieval tests')
parser.add_argument('--hierarchical_data_path', type=str, default='', help='Dataset for hierarchical clustering')
parser.add_argument('--construct_msa_tree', type=str, default='False', help='Whether to construct NJ tree testset')
parser.add_argument('--extr_data_path', type=str, default='', help='Dataset for further edit distance tests')
parser.add_argument('--scaling', type=str, default='False', help='Project to hypersphere (for hyperbolic)')
parser.add_argument('--hyp_optimizer', type=str, default='Adam', help='Optimizer for hyperbolic (Adam or RAdam)')
return parser
def execute_train(model_class, model_args, args):
# set device
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = 'cpu'
print('Using device:', device)
# set the random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# load data
datasets = load_edit_distance_dataset(args.data)
loaders = get_dataloaders(datasets, batch_size=args.batch_size, workers=args.workers)
# fix hyperparameters
model_args = SimpleNamespace(**model_args)
model_args.device = device
model_args.len_sequence = datasets['train'].len_sequence
model_args.embedding_size = args.embedding_size
model_args.dropout = args.dropout
print("Length of sequence", datasets['train'].len_sequence)
args.scaling = True if args.scaling == 'True' else False
# generate model
embedding_model = model_class(**vars(model_args))
model = PairEmbeddingDistance(embedding_model=embedding_model, distance=args.distance, scaling=args.scaling)
model.to(device)
# select optimizer
if args.distance == 'hyperbolic' and args.hyp_optimizer == 'RAdam':
optimizer = RAdam(model.parameters(), lr=args.lr)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# select loss
loss = None
if args.loss == "mse":
loss = nn.MSELoss()
elif args.loss == "mae":
loss = nn.L1Loss()
elif args.loss == "mape":
loss = MAPE
# print total number of parameters
total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total params', total_params)
# Train model
t_total = time.time()
bad_counter = 0
best = 1e10
best_epoch = -1
start_epoch = 0
for epoch in range(start_epoch, args.epochs):
t = time.time()
loss_train = train(model, loaders['train'], optimizer, loss, device)
loss_val = test(model, loaders['val'], loss, device)
# print progress
if epoch % args.print_every == 0:
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.6f}'.format(loss_train),
'loss_val: {:.6f} MAPE {:.4f}'.format(*loss_val),
'time: {:.4f}s'.format(time.time() - t))
sys.stdout.flush()
if loss_val[0] < best:
# save current model
torch.save(model.state_dict(), '{}.pkl'.format(epoch))
# remove previous model
if best_epoch >= 0:
os.remove('{}.pkl'.format(best_epoch))
# update training variables
best = loss_val[0]
best_epoch = epoch
bad_counter = 0
else:
bad_counter += 1
if bad_counter == args.patience:
print('Early stop at epoch {} (no improvement in last {} epochs)'.format(epoch + 1, bad_counter))
break
print('Optimization Finished!')
print('Total time elapsed: {:.4f}s'.format(time.time() - t_total))
# Restore best model
print('Loading {}th epoch'.format(best_epoch + 1))
model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))
# Testing
for dset in loaders.keys():
if args.plot:
avg_loss = test_and_plot(model, loaders[dset], loss, device, dset)
else:
avg_loss = test(model, loaders[dset], loss, device)
print('Final results {}: loss = {:.6f} MAPE {:.4f}'.format(dset, *avg_loss))
# Nearest neighbour retrieval
if args.closest_data_path != '':
print("Closest string retrieval")
closest_string_testing(encoder_model=model, data_path=args.closest_data_path,
batch_size=args.batch_size, device=device, distance=args.distance)
# Hierarchical clustering
if args.hierarchical_data_path != '':
print("Hierarchical clustering")
hierarchical_clustering_testing(encoder_model=model, data_path=args.hierarchical_data_path,
batch_size=args.batch_size, device=device, distance=args.distance)
# MSA tree construction on test set
if args.construct_msa_tree == 'True':
print("MSA tree construction")
approximate_guide_trees(encoder_model=model, dataset=datasets['test'],
batch_size=args.batch_size, device=device, distance=args.distance)
# Extra datasets testing (e.g. extrapolation)
if args.extr_data_path != '':
print("Extra datasets testing")
datasets = load_edit_distance_dataset(args.extr_data_path)
loaders = get_dataloaders(datasets, batch_size=max(1, args.batch_size // 8), workers=args.workers)
for dset in loaders.keys():
if args.plot:
avg_loss = test_and_plot(model, loaders[dset], loss, device, dset)
else:
avg_loss = test(model, loaders[dset], loss, device)
print('Final results {}: loss = {:.6f} MAPE {:.4f}'.format(dset, *avg_loss))
torch.save((model_class, model_args, model.embedding_model.state_dict(), args.distance),
'{}.pkl'.format(model_class.__name__))
def load_edit_distance_dataset(path):
with open(path, 'rb') as f:
sequences, distances = pickle.load(f)
datasets = {}
for key in sequences.keys():
if len(sequences[key].shape) == 2: # datasets without batches
if key == 'train':
datasets[key] = EditDistanceDatasetSampled(sequences[key].unsqueeze(0), distances[key].unsqueeze(0),
multiplicity=10)
else:
datasets[key] = EditDistanceDatasetComplete(sequences[key], distances[key])
else: # datasets with batches
datasets[key] = EditDistanceDatasetSampled(sequences[key], distances[key])
return datasets
def load_edit_distance_dataset_calculate(path):
with open(path, 'rb') as f:
sequences, distances = pickle.load(f)
datasets = {}
for key in sequences.keys():
if len(sequences[key].shape) == 2: # datasets without batches
if key == 'train':
datasets[key] = EditDistanceDatasetSampledCalculated(sequences[key].unsqueeze(0), distances[key].unsqueeze(0),
multiplicity=10)
else:
datasets[key] = EditDistanceDatasetCompleteCalculated(sequences[key], distances[key])
else: # datasets with batches
datasets[key] = EditDistanceDatasetSampledCalculated(sequences[key], distances[key])
return datasets
def train(model, loader, optimizer, loss, device):
device = 'cpu'
avg_loss = AverageMeter()
model.train()
for sequences, labels in loader:
# move examples to right device
# sequences, labels = sequences.to(device), labels.to(device)
with torch.autograd.set_detect_anomaly(True):
# forward propagation
optimizer.zero_grad()
output = model(sequences)
# loss and backpropagation
loss_train = loss(output, labels)
loss_train.backward()
optimizer.step()
# keep track of average loss
avg_loss.update(loss_train.data.item(), sequences.shape[0])
return avg_loss.avg
def test(model, loader, loss, device):
avg_loss = AverageMeter()
model.eval()
for sequences, labels in loader:
# move examples to right device
# sequences, labels = sequences.to(device), labels.to(device)
# forward propagation and loss computation
output = model(sequences)
loss_val = loss(output, labels).data.item()
avg_loss.update(loss_val, sequences.shape[0])
return avg_loss.avg
def test_and_plot(model, loader, loss, device, dataset):
avg_loss = AverageMeter(len_tuple=2)
model.eval()
output_list = []
labels_list = []
for sequences, labels in loader:
# move examples to right device
sequences, labels = sequences.to(device), labels.to(device)
# forward propagation and loss computation
output = model(sequences)
loss_val = loss[dt](output, labels).data.item()
mape = MAPE(output, labels).data.item()
avg_loss.update((loss_val, mape), sequences.shape[0])
# append real and predicted distances to lists
output_list.append(output.cpu().detach().numpy())
labels_list.append(labels.cpu().detach().numpy())
# save real and predicted distances for offline plotting
outputs = np.concatenate(output_list, axis=0)
labels = np.concatenate(labels_list, axis=0)
pickle.dump((outputs, labels), open(dataset + ".pkl", "wb"))
# plt.plot(outputs, labels, 'o', color='black')
# plt.show()
return avg_loss.avg
#%%
# Train my models
import os
os.environ['GEOMSTATS_BACKEND'] = 'pytorch'
import torch
from torch import nn
import torch.optim as optim
import time
import argparse
from edit_distance.task.dataset_generator_genomic import EditDistanceGenomicDatasetGenerator
from util.data_handling.data_loader import get_dataloaders
from edit_distance.train import load_edit_distance_dataset,train,test
from edit_distance.models.pair_encoder import PairEmbeddingDistance
class LinearEncoder(nn.Module):
""" Linear model which simply flattens the sequence and applies a linear transformation. """
def __init__(self, len_sequence, embedding_size, alphabet_size=4):
super(LinearEncoder, self).__init__()
self.encoder = nn.Linear(in_features=alphabet_size * len_sequence,
out_features=embedding_size)
def forward(self, sequence):
# flatten sequence and apply layer
B = sequence.shape[0]
sequence = sequence.reshape(B, -1)
emb = self.encoder(sequence)
return emb
def run_model(dataset_name, embedding_size, dist_type, string_size, n_epoch):
device = 'cpu'
torch.manual_seed(2021)
if device == 'cuda':
torch.cuda.manual_seed(2021)
# | |
"""
return LRange ( vmin , vmax , n )
# =============================================================================
## loop over values between xmin and xmax in log-scale
# @code
# for v in lrange ( vmin , vmax , 200 ) : ## ditto
# print (v)
# @endcode
def lrange ( vmin , vmax , n = 100 ) :
""":oop over values between vmin and vmax in log-scale
>>> for v in lrange ( vmin , vmax , 200 ) : ## ditto
>>> print (v)
"""
return LRange ( vmin , vmax , n )
# =============================================================================
## split range into smaller chunks:
# @code
# for i in split_range ( 0 , 10000 , 200 ) :
# for j in range (*i) :
# ...
# @endcode
def split_range ( low , high , num ) :
"""Split range into smaller chunks:
>>> for i in split_range ( 0 , 10000 , 200 ) :
>>> for j in range (*i) :
>>> ...
"""
if high <= low or num < 1 :
yield low , low
else :
next = low + num
while next < high :
yield low , next
low = next
next += num
yield low , high
# =============================================================================
if (3,6) <= sys.version_info :
choices = random.choices
else :
def choices ( population , weights = None , cum_weights = None , k = 1 ) :
""" Simple variant of `random.choice`
"""
assert weights is None and cum_weights is None,\
"choices: Neither ``weigths'' nor ``cum_weights'' are supported!"
return [ random.choice ( population ) for i in range ( k ) ]
# ========================================================================================
## Generate some random name of given name
# @code
# name = random_name ( 5 )
# @endcode
def random_name ( size ) :
"""Generate some random name of given name
>>> name = random_name ( 5 )
"""
assert 1 <= size , 'random_name: invalid size!'
first = random.choice ( ascii_letters )
if 1 == size : return first
return first + ''.join ( choices ( sll_symbols , k = size - 1 ) )
# ========================================================================================
## generate some pseudo-random 6-symbol name from provided hash sources
def short_hash_name ( size , name , *names ) :
"""generate some pseudo-random 6-symbol name from provided hash sources
"""
size = max ( min ( size , 8 ) , 4 )
h = size , hash ( tuple ( ord ( i ) for i in name ) )
h = hash ( h )
for n in names :
h = h , hash ( tuple ( ord ( i ) for i in n ) )
h = hash ( h )
h = abs ( h ) % ( 2 ** ( 4 * size ) )
return ( '%%0%dx' % size ) % h
# =============================================================================
## Generate the random string, that can be used as password or secret word
# @code
# password = <PASSWORD> ()
# @endcode
def gen_password ( size = 12 ) :
"""Generate the random string, that can be used as password or secret word
>>> password = <PASSWORD> ()
"""
import random
## save random state
state = random.getstate ()
## reset the random seed
random.seed ()
## generate the password
result = ''.join ( choices ( all_symbols , k = size ) )
## restore the random state
random.setstate ( state )
##
return result
# =============================================================================
try :
from more_itertools import chunked, divide
except ImportError :
from itertools import islice
from functools import partial
# =========================================================================
## Return first *n* items of the iterable as a list
# @code
# take(3, range(10)) ## [0, 1, 2]
# take(5, range(3)) ## [0, 1, 2]
# @endcode
#
# The function is copied from <code>more_itertools</code>
def take(n, iterable):
"""Return first *n* items of the iterable as a list.
>>> take(3, range(10))
[0, 1, 2]
>>> take(5, range(3))
[0, 1, 2]
Effectively a short replacement for ``next`` based iterator consumption
when you want more than one item, but less than the whole iterator.
- the function is copied from `more_itertools`
"""
return list(islice(iterable, n))
# =========================================================================
## Break *iterable* into lists of length *n*:
# @code
# list(chunked([1, 2, 3, 4, 5, 6], 3)) ## [[1, 2, 3], [4, 5, 6]]
# @endcode
# If the length of *iterable* is not evenly divisible by *n*, the last
# returned list will be shorter:
# @code
# list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3)) ## [[1, 2, 3], [4, 5, 6], [7, 8]]
# @endcode
# <code>chunked</code> is useful for splitting up a computation on a large number
# of keys into batches, to be pickled and sent off to worker processes. One
# example is operations on rows in MySQL, which does not implement
# server-side cursors properly and would otherwise load the entire dataset
# into RAM on the client.
#
# The function is copied from <code>more_itertools</code>
def chunked(iterable, n):
"""Break *iterable* into lists of length *n*:
>>> list(chunked([1, 2, 3, 4, 5, 6], 3))
[[1, 2, 3], [4, 5, 6]]
If the length of *iterable* is not evenly divisible by *n*, the last
returned list will be shorter:
>>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
[[1, 2, 3], [4, 5, 6], [7, 8]]
To use a fill-in value instead, see the :func:`grouper` recipe.
:func:`chunked` is useful for splitting up a computation on a large number
of keys into batches, to be pickled and sent off to worker processes. One
example is operations on rows in MySQL, which does not implement
server-side cursors properly and would otherwise load the entire dataset
into RAM on the client.
- the function is copied from `more_itertools`
"""
return iter(partial(take, n, iter(iterable)), [])
# =========================================================================
## Divide the elements from *iterable* into *n* parts, maintaining order.
# @code
# >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
# >>> list(group_1)
# ... [1, 2, 3]
# >>> list(group_2)
# ... [4, 5, 6]
# @endcode
# If the length of *iterable* is not evenly divisible by *n*, then the
# length of the returned iterables will not be identical:
# @code
# >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
# >>> [list(c) for c in children]
# ... [[1, 2, 3], [4, 5], [6, 7]]
# @endcode
#
# If the length of the iterable is smaller than n, then the last returned
# iterables will be empty:
# @code
# >>> children = divide(5, [1, 2, 3])
# >>> [list(c) for c in children]
# ... [[1], [2], [3], [], []]
# @endcode
#
# This function will exhaust the iterable before returning and may require
# significant storage. If order is not important, see :func:`distribute`,
# which does not first pull the iterable into memory.
#
# The function is copied from <code>more_itertools</code>
def divide ( n , iterable):
"""Divide the elements from *iterable* into *n* parts, maintaining
order.
>>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
>>> list(group_1)
[1, 2, 3]
>>> list(group_2)
[4, 5, 6]
If the length of *iterable* is not evenly divisible by *n*, then the
length of the returned iterables will not be identical:
>>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
>>> [list(c) for c in children]
[[1, 2, 3], [4, 5], [6, 7]]
If the length of the iterable is smaller than n, then the last | |
<filename>pyke/keppca.py
from .utils import PyKEArgumentHelpFormatter
from . import kepmsg, kepio, kepkey, kepplot
import re
import numpy as np
from astropy.io import fits as pyfits
from scipy import optimize as opt
from matplotlib import pyplot as plt
from tqdm import tqdm
import random
__all__ = ['keppca']
def keppca(infile, outfile=None, maskfile='ALL', components='1-3', plotpca=False,
nmaps=10, overwrite=False, verbose=False, logfile='keppca.log'):
"""
keppca -- Perform principal component analysis upon a target pixel file
keppca provides a method to mitigate for motion-derived systematic
artifacts via Principle Component Analysis (PCA). This method was
demonstrated on Kepler light curves by Harrison et al. (2012). It provides
an alternative to cotrending data using basis vectors (kepcotrend) and
correlating aperture photometry struture with time-series centroid
measurements (kepsff). PCA will perhaps become a more widespread tool in
the K2 era where the magnitde of target motion across the detector over a
Kepler quarter is experienced by a K2 target over just 6-hours during its
regular sequence of thruster firings that counteract boresight roll motion
Pixel-level PCA employs only those pixels collected around a specific
target and separates photometric trends common to all pixels from trends
localized to individual targets or pixels in a series of principal
component curves.
The user has the option to choose the specific set of pixels to sample in
this analysis. Principal components are plotted by the tool and written out
to an output FITS file in an output extension called PRINCIPAL_COMPONENTS.
The extension contains a 2D table with one row per timestamp recorded in
the input file and one column for every principal component. Summing all
principal components together will reconstruct a normalized version of the
summed pixel within the chosen aperture. The user also has the choice of
which principal components to optimally-subtract from the aperture-derived
light curve in order to remove motion systematics from the time-series
data. The aperture light curve and the corrected light curve are written to
the LIGHTCURVE extension of the output file. The first populates the
SAP_FLUX data column and the second is written to a column called PCA_FLUX.
This output file can be used as input for other PyKE tasks and can be e.g.
inspected using kepdraw.
Parameters
----------
infile : str
The name of a standard format FITS file containing Kepler or K2 target
pixels within the first data extension.
outfile : str
Filename for the output light curves and principal components. This
product will be written to the same FITS format as archived light
curves. Aperture photometry will be stored in the SAP_FLUX column of
the first FITS extension called LIGHTCURVE. A version of this light
curve with principal components subtracted is stored in column PCA_FLUX
and a normalized version is stored in PCA_FLUX_NRM. The individual
principal components are stored within a new FITS extension called
PRINCIPAL_COMPONENTS.
maskfile : str
This string can be one of three options:
* 'ALL' tells the task to calculate principal components from all
pixels within the pixel mask stored in the input file.
* 'APER' tells the task to calculate principal components from only the
pixels within the photometric aperture stored in the input file (e.g.
only those pixels summed by the Kepler pipeline to produce the light
curve archived at MAST (note that no such light curves are currently
being created for the K2 mission)
* A filename describing the desired photometric aperture. Such a file
can be constructed using the kepmask or kepffi tools, or can be created
manually using the format described in the documentation for those
tools. Note that if an aperture provided is not stricly rectangular,
keppca will increase the size of the aperture so that it defines the
smallest possible rectangle that contains all of the specified pixels.
components : str
A list of the principal components to subtract from the aperture light
curve. The strings '1 2 3 4 5', 1,'2,3,4,5' and '1,2,3-5' yield the
same result.
plotpca : bool
If True, keppca will produce plots containing individual principal
components, correlation maps and light curves, both aperture and
PCA-corrected versions. The will be stored as hardcopies in PNG format.
nmaps : int
The number of correlation maps and principal components to plot as
output. This can be any positive integer up to the number of pixels
within the mask, although note that many hundreds of plots will likely
become prohibitive and is unlikely to be informative.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning message
Examples
--------
.. code-block:: bash
$ keppca ktwo202073445-c00_lpd-targ.fits.gz --plotpca
.. image:: ../_static/images/api/keppca.png
:align: center
"""
import mdp
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPCA -- '
+ ' infile={}'.format(infile)
+ ' maskfile={}'.format(maskfile)
+ ' outfile={}'.format(outfile)
+ ' components={}'.format(components)
+ ' plotpca={}'.format(plotpca)
+ ' nmaps={}'.format(nmaps)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call + '\n', verbose)
kepmsg.clock('KEPPCA started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPPCA: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# Set output file names - text file with data and plot
dataout = np.copy(outfile)
repname = re.sub('.fits', '.png', outfile)
# open input file
instr = pyfits.open(infile, mode='readonly', memmap=True)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
# open TPF FITS file
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile, 'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg = \
kepio.readTPF(infile, 'FLUX_BKG', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, flux_bkg_err = \
kepio.readTPF(infile, 'FLUX_BKG_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile, 'QUALITY', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr1 = \
kepio.readTPF(infile, 'POS_CORR1', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, pcorr2 = \
kepio.readTPF(infile, 'POS_CORR2', logfile ,verbose)
# Save original data dimensions, in case of using maskfile
xdimorig = xdim
ydimorig = ydim
# read mask definition file if it has been supplied
if 'aper' not in maskfile.lower() and maskfile.lower() != 'all':
maskx = np.array([], 'int')
masky = np.array([], 'int')
lines = kepio.openascii(maskfile, 'r', logfile, verbose)
for line in lines:
line = line.strip().split('|')
if len(line) == 6:
y0 = int(line[3])
x0 = int(line[4])
line = line[5].split(';')
for items in line:
try:
masky = np.append(masky, y0 + int(items.split(',')[0]))
maskx = np.append(maskx, x0 + int(items.split(',')[1]))
except:
continue
kepio.closeascii(lines, logfile, verbose)
if len(maskx) == 0 or len(masky) == 0:
errmsg = 'ERROR -- KEPPCA: {} contains no pixels.'.format(maskfile)
kepmsg.err(logfile, errmsg, verbose)
xdim = max(maskx) - min(maskx) + 1 # Find largest x dimension of mask
ydim = max(masky) - min(masky) + 1 # Find largest y dimension of mask
# pad mask to ensure it is rectangular
workx = np.array([], 'int')
worky = np.array([], 'int')
for ip in np.arange(min(maskx), max(maskx) + 1):
for jp in np.arange(min(masky), max(masky) + 1):
workx = np.append(workx, ip)
| |
<filename>main_esvit.py
# coding=utf-8
# Modified by <NAME> (<EMAIL>)
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import models.vision_transformer as vits
from models.vision_transformer import DINOHead
from models import build_model
from timm.data import create_transform
from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform
from timm.data.transforms import _pil_interp, RandomResizedCropAndInterpolation, ToNumpy, ToTensor
from timm.data.random_erasing import RandomErasing
from timm.data import Mixup
from config import config
from config import update_config
from config import save_config
from datasets import build_dataloader
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('EsViT', add_help=False)
parser.add_argument('--cfg',
help='experiment configure file name',
type=str)
# Model parameters
parser.add_argument('--arch', default='deit_small', type=str,
choices=['cvt_tiny', 'cvt_small', 'swin_tiny','swin_small', 'swin_base', 'swin_large', 'swin', 'vil', 'vil_1281', 'vil_2262', 'vil_14121', 'deit_tiny', 'deit_small', 'vit_base'] + torchvision_archs,
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using deit_tiny or deit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (deit_tiny, deit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with deit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
parser.add_argument('--use_dense_prediction', default=False, type=utils.bool_flag,
help="Whether to use dense prediction in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, nargs='+', default=(8,), help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
parser.add_argument('--local_crops_size', type=int, nargs='+', default=(96,), help="""Crop region size of local views to generate.
When disabling multi-crop we recommend to use "--local_crops_size 96." """)
# Augmentation parameters
parser.add_argument('--aug-opt', type=str, default='dino_aug', metavar='NAME',
help='Use different data augmentation policy. [deit_aug, dino_aug, mocov2_aug, basic_aug] \
"(default: dino_aug)')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--use_mixup', type=utils.bool_flag, default=False, help="""Whether or not to use mixup/mixcut for self-supervised learning.""")
parser.add_argument('--num_mixup_views', type=int, default=10, help="""Number of views to apply mixup/mixcut """)
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.0, help='Label smoothing (default: 0.1)')
# Dataset
parser.add_argument('--dataset', default="imagenet1k", type=str, help='Pre-training dataset.')
parser.add_argument('--zip_mode', type=utils.bool_flag, default=False, help="""Whether or not to use zip file.""")
parser.add_argument('--tsv_mode', type=utils.bool_flag, default=False, help="""Whether or not to use tsv file.""")
parser.add_argument('--sampler', default="distributed", type=str, help='Sampler for dataloader.')
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--pretrained_weights_ckpt', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=5, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
return parser
def train_esvit(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = False
# ============ preparing data ... ============
data_loader = build_dataloader(args)
# setup mixup / cutmix
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active and args.use_mixup:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.batch_size_per_gpu)
# ============ building student and | |
import tensorflow as tf
from .activations import swish, mish
from tensorflow.keras.layers import Dense
from Nn.layers import Noisy, mlp
from GCN.layers import GraphConvolution
from Attention.attention import MultiHeadAttention
import numpy as np
activation_fn = 'tanh'
initKernelAndBias = {
'kernel_initializer': tf.random_normal_initializer(0.0, .1),
'bias_initializer': tf.constant_initializer(0.1) # 2.x 不需要指定dtype
}
class Model(tf.keras.Model):
def __init__(self, visual_net, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.visual_net = visual_net
self.tv = []
self.tv += self.visual_net.trainable_variables
def call(self, vector_input, visual_input, *args, **kwargs):
'''
args: action, reward, done. etc ...
'''
features = self.visual_net(visual_input)
ret = self.init_or_run(
tf.concat((vector_input, features), axis=-1),
*args,
**kwargs)
return ret
def update_vars(self):
self.tv += self.trainable_variables
def init_or_run(self, x):
raise NotImplementedError
class actor_dpg(Model):
'''
use for DDPG and/or TD3 algorithms' actor network.
input: vector of state
output: deterministic action(mu) and disturbed action(action) given a state
'''
def __init__(self, vector_dim, output_shape, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.net = mlp(hidden_units, output_shape=output_shape, out_activation='tanh')
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim))
self.update_vars()
def init_or_run(self, x):
mu = self.net(x)
return mu
class actor_dpg_gcn(tf.keras.Model):
def __init__(self, vector_dim, output_shape, name, hidden_units, visual_net, **kwargs):
super(actor_dpg_gcn, self).__init__(name=name, **kwargs)
self.visual_net = visual_net
self.tv = []
self.tv += self.visual_net.trainable_variables
self.gcn_layers = []
self.gcn_layers.append(GraphConvolution(input_dim=64,
output_dim=64,
num_features_nonzero=0,
activation=tf.nn.relu,
dropout=0.5,
is_sparse_inputs=False,
bias = False,
featureless = False))
# self.attention_layer = MultiHeadAttention(d_model=64, num_heads=8)
self.layer_x_embeding = Dense(64, activation='tanh')
self.layer_a1 = Dense(64, activation='tanh')
# self.layer_a2 = Dense(64, activation='tanh')
self.net = Dense(output_shape, activation='tanh')
self.init_or_run(tf.keras.Input(shape=(vector_dim[0], vector_dim[0])), tf.keras.Input(shape=vector_dim))
self.update_vars()
def call(self, adj, x, visual_input):
features = self.visual_net(visual_input)
ret = self.init_or_run(adj, x)
return ret
def init_or_run(self, adj, x):
x = self.layer_x_embeding(x)
outputs = [x]
for layer in self.gcn_layers:
hidden = layer((outputs[-1], adj))
outputs.append(hidden)
output = outputs[-1]
# out, attn = self.attention_layer(x, k=x, q=x, mask=None)
# indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
# params = [[['a0', 'b0'], ['c0', 'd0']],
# [['a1', 'b1'], ['c1', 'd1']]]
# output = [['b0', 'b1'], ['d0', 'c1']]
# indices = []
# if out.shape[0] == None:
# for j in range(out.shape[2]):
# indices.append([0, j])
# else:
# for i in range(out.shape[0]):
# indice = []
# for j in range(out.shape[2]):
# indice.append([i, 0, j])
# indices.append(indice)
#
# out = tf.gather_nd(params=out, indices=tf.convert_to_tensor(np.asarray(indices), dtype='int32'), name=None)
layer_a1 = self.layer_a1(output[:, 0, :])
# layer_a2 = self.layer_a2(layer_a1)
ret = self.net(layer_a1)
return ret
def update_vars(self):
self.tv += self.trainable_variables
class actor_mu(Model):
'''
use for PPO/PG algorithms' actor network.
input: vector of state
output: stochastic action(mu), normally is the mean of a Normal distribution
'''
def __init__(self, vector_dim, output_shape, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.net = mlp(hidden_units, output_shape=output_shape, out_activation='tanh')
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim), tf.keras.Input)
self.update_vars()
def init_or_run(self, x):
mu = self.net(x)
return mu
class actor_continuous(Model):
'''
use for continuous action space.
input: vector of state
output: mean(mu) and log_variance(log_std) of Gaussian Distribution of actions given a state
'''
def __init__(self, vector_dim, output_shape, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.share = mlp(hidden_units['share'], out_layer=False)
self.mu = mlp(hidden_units['mu'], output_shape=output_shape, out_activation=None)
self.log_std = mlp(hidden_units['log_std'], output_shape=output_shape, out_activation='tanh')
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim))
self.update_vars()
def init_or_run(self, x):
x = self.share(x)
mu = self.mu(x)
log_std = self.log_std(x)
return (mu, log_std)
class actor_continuous_gcn(tf.keras.Model):
def __init__(self, vector_dim, output_shape, name, hidden_units, visual_net, **kwargs):
super(actor_continuous_gcn, self).__init__(name=name, **kwargs)
self.visual_net = visual_net
self.tv = []
self.tv += self.visual_net.trainable_variables
self.gcn_layers1 = GraphConvolution(input_dim=64,
output_dim=64,
num_features_nonzero=0,
activation=tf.nn.relu,
dropout=0.5,
is_sparse_inputs=False,
bias=False,
featureless=False)
# self.gcn_layers2 = GraphConvolution(input_dim=64,
# output_dim=64,
# num_features_nonzero=0,
# activation=tf.nn.relu,
# dropout=0.5,
# is_sparse_inputs=False,
# bias=False,
# featureless=False)
#
# self.attention_layer1 = MultiHeadAttention(d_model=64, num_heads=8)
# self.attention_layer2 = MultiHeadAttention(d_model=64, num_heads=8)
self.layer_x_embeding = Dense(64, activation='tanh')
# self.share = mlp(hidden_units['share'], out_layer=False)
self.layer_1 = Dense(64, activation='tanh')
self.mu = mlp(hidden_units['mu'], output_shape=output_shape, out_activation=None)
self.log_std = mlp(hidden_units['log_std'], output_shape=output_shape, out_activation='tanh')
self.init_or_run(tf.keras.Input(shape=(vector_dim[0], vector_dim[0])), tf.keras.Input(shape=vector_dim))
self.update_vars()
def call(self, adj, x, visual_input):
features = self.visual_net(visual_input)
mu, log_std = self.init_or_run(adj, x)
return (mu, log_std)
def init_or_run(self, adj, x):
x = self.layer_x_embeding(x)
hidden1 = self.gcn_layers1((x, adj))
# out1, attn1 = self.attention_layer1(hidden1, k=hidden1, q=hidden1, mask=None)
#
# hidden2 = self.gcn_layers1((out1, adj))
# out2, attn2 = self.attention_layer2(hidden2, k=hidden2, q=hidden2, mask=None)
layer_1 = self.layer_1(tf.concat((x[:, 0, :], hidden1[:, 0, :]), axis=-1))
# layer_1 = self.layer_1(x[:, 0, :])
mu = self.mu(layer_1)
log_std = self.log_std(layer_1)
return (mu, log_std)
def update_vars(self):
self.tv += self.trainable_variables
class actor_discrete(Model):
'''
use for discrete action space.
input: vector of state
output: probability distribution of actions given a state
'''
def __init__(self, vector_dim, output_shape, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.logits = mlp(hidden_units, output_shape=output_shape, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim))
self.update_vars()
def init_or_run(self, x):
logits = self.logits(x)
return logits
class critic_q_one(Model):
'''
use for evaluate the value given a state-action pair.
input: tf.concat((state, action),axis = 1)
output: q(s,a)
'''
def __init__(self, vector_dim, action_dim, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.net = mlp(hidden_units, output_shape=1, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim), tf.keras.Input(shape=action_dim))
self.update_vars()
def init_or_run(self, x, a):
q = self.net(tf.concat((x, a), axis=-1))
return q
class critic_q_one_gcn(tf.keras.Model):
def __init__(self, vector_dim, output_shape, name, hidden_units, visual_net, **kwargs):
super(critic_q_one_gcn, self).__init__(name=name, **kwargs)
self.visual_net = visual_net
self.tv = []
self.tv += self.visual_net.trainable_variables
# self.gcn_layers = []
# self.gcn_layers.append(GraphConvolution(input_dim=128,
# output_dim=128,
# num_features_nonzero=0,
# activation=tf.nn.relu,
# dropout=0.5,
# is_sparse_inputs=False))
# self.attention_layer = MultiHeadAttention(d_model=64, num_heads=8)
self.layer_x_embeding = Dense(64, activation='tanh')
self.layer_c1 = Dense(32, activation='tanh')
self.layer_c2 = Dense(64, activation='tanh')
self.value = Dense(1)
self.init_or_run(tf.keras.Input(shape=(vector_dim[0], vector_dim[0])), tf.keras.Input(shape=vector_dim), tf.keras.Input(shape=output_shape))
self.update_vars()
def call(self, adj, x, visual_input, a):
features = self.visual_net(visual_input)
ret = self.init_or_run(adj, x, a)
return ret
def init_or_run(self, adj, x, a):
x = self.layer_x_embeding(x)
# outputs = [x]
# for layer in self.gcn_layers:
# hidden = layer((outputs[-1], adj))
# outputs.append(hidden)
# output = outputs[-1]
# out, attn = self.attention_layer(x, k=x, q=x, mask=None)
action_emb = self.layer_c1(a)
state_action = tf.concat([x[:, 0, :], action_emb], -1)
layer_c2 = self.layer_c2(state_action)
value = self.value(layer_c2)
return value
def update_vars(self):
self.tv += self.trainable_variables
class critic_q_one2(Model):
'''
Original architecture in DDPG paper.
s-> layer -> feature, then tf.concat(feature, a) -> layer -> output
'''
def __init__(self, vector_dim, action_dim, name, hidden_units, *, visual_net):
assert len(hidden_units) > 1, "if you want to use this architecture of critic network, the number of layers must greater than 1"
super().__init__(visual_net, name=name)
self.state_feature_net = mlp(hidden_units[0:1])
self.net = mlp(hidden_units[1:], output_shape=1, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim), tf.keras.Input(shape=action_dim))
self.update_vars()
def init_or_run(self, x, a):
features = self.state_feature_net(x)
q = self.net(tf.concat((x, a), axis=-1))
return q
class critic_q_one3(Model):
'''
Original architecture in TD3 paper.
tf.concat(s,a) -> layer -> feature, then tf.concat(feature, a) -> layer -> output
'''
def __init__(self, vector_dim, action_dim, name, hidden_units, *, visual_net):
assert len(hidden_units) > 1, "if you want to use this architecture of critic network, the number of layers must greater than 1"
super().__init__(visual_net, name=name)
self.feature_net = mlp(hidden_units[0:1])
self.net = mlp(hidden_units[1:], output_shape=1, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim), tf.keras.Input(shape=action_dim))
self.update_vars()
def init_or_run(self, x, a):
features = self.feature_net(tf.concat((x, a), axis=-1))
q = self.net(tf.concat((features, a), axis=-1))
return q
class critic_v(Model):
'''
use for evaluate the value given a state.
input: vector of state
output: v(s)
'''
def __init__(self, vector_dim, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.net = mlp(hidden_units, output_shape=1, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim))
self.update_vars()
def init_or_run(self, x):
v = self.net(x)
return v
class critic_q_all(Model):
'''
use for evaluate all values of Q(S,A) given a state. must be discrete action space.
input: vector of state
output: q(s, *)
'''
def __init__(self, vector_dim, output_shape, name, hidden_units, *, visual_net):
super().__init__(visual_net, name=name)
self.net = mlp(hidden_units, output_shape=output_shape, out_activation=None)
self.init_or_run(tf.keras.Input(shape=vector_dim + self.visual_net.hdim))
self.update_vars()
def init_or_run(self, x):
q = self.net(x)
return q
class critic_q_all_gcn(tf.keras.Model):
def __init__(self, vector_dim, output_shape, name, hidden_units, visual_net, **kwargs):
super(critic_q_all_gcn, self).__init__(name=name, **kwargs)
self.visual_net = visual_net
self.tv = []
self.tv += self.visual_net.trainable_variables
# self.gcn_layers = []
# self.gcn_layers.append(GraphConvolution(input_dim=64,
# output_dim=64,
# num_features_nonzero=0,
# activation=tf.nn.relu,
# dropout=0.5,
# is_sparse_inputs=False))
# self.attention_layer1 = MultiHeadAttention(d_model=64, num_heads=8)
# self.attention_layer2 = MultiHeadAttention(d_model=64, num_heads=8)
self.layer_x_embeding = Dense(64, activation='tanh')
self.layer_c1 = Dense(64, activation='tanh')
# self.layer_c2 = Dense(64, activation='tanh')
self.value = Dense(output_shape)
self.init_or_run(tf.keras.Input(shape=(vector_dim[0], vector_dim[0])), tf.keras.Input(shape=vector_dim))
self.update_vars()
def call(self, adj, x, visual_input):
features = self.visual_net(visual_input)
value = self.init_or_run(adj, x)
return value
def init_or_run(self, adj, x):
x = self.layer_x_embeding(x)
# outputs = [x]
# for layer in self.gcn_layers:
# hidden = layer((outputs[-1], adj))
# outputs.append(hidden)
# output = outputs[-1]
# out1, attn1 = self.attention_layer1(x, k=x, q=x, mask=None)
# out2, attn2 = self.attention_layer2(out1, k=out1, q=out1, mask=None)
layer_c1 = self.layer_c1(x[:, 0, :])
value = self.value(layer_c1)
return value
def update_vars(self):
self.tv += self.trainable_variables
class drqn_critic_q_all(Model):
'''
use for evaluate all values of Q(S,A) given a state. must be discrete action space.
input: vector of state
| |
<reponame>hamzy/OpenBMC
#!/usr/bin/env python
"""
Object library to interact with an OpenBMC controller.
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Documentation:
#
# OpenBMC cheatsheet
# https://github.com/openbmc/docs/blob/master/cheatsheet.md
#
# OpenBMC REST API
# https://github.com/openbmc/docs/blob/master/rest-api.md
#
# OpenBMC DBUS API
# https://github.com/openbmc/docs/blob/master/dbus-interfaces.md
#
# https://github.com/openbmc/openbmc-test-automation/blob/master/lib/utils.robot
#
# https://github.com/causten/tools/tree/master/obmc
#
# pylint: disable=too-many-arguments
# pylint: disable=too-few-public-methods
# pylint: disable=global-statement
# What is with [invalid-name] Invalid variable name "fp"
# pylint: disable=invalid-name
from __future__ import print_function
import json
import os
import sys
import requests
# Sadly a way to fit the line into 78 characters mainly
JSON_HEADERS = {"Content-Type": "application/json"}
DEBUG = False
def set_debug(value):
"""Set the debugging level"""
global DEBUG
DEBUG = value
class HTTPError(Exception):
"""Custom HTTP error exception"""
def __init__(self, url, status_code, data=None):
super(HTTPError, self).__init__("%s - %s - %s" % (url,
status_code,
data, ))
self.url = url
self.status_code = status_code
self.data = data
def __str__(self):
if self.data is not None:
return "HTTP Error %s: %s %s" % (self.status_code,
self.url,
self.data, )
else:
return "HTTP Error %s: %s" % (self.status_code,
self.url, )
def __repr__(self):
return self.__str__()
def get_status_code(self):
"""Return the status code"""
return self.status_code
def safe_filename(filename):
"""Turn a URL into a safe filename"""
filename = filename.replace(':', "%"+hex(ord(':')))
filename = filename.replace('/', "%"+hex(ord('/')))
return filename
def load_response_from_file(filename,
url,
verify,
headers,
data=None):
"""Loads a response from a saved file"""
if DEBUG:
print("load_response_from_file (%s)" % (filename, ))
# import pdb
# pdb.set_trace()
if os.path.isfile(filename):
saved = None
with open(filename, "r") as fp:
json_data = fp.read()
saved = json.loads(json_data)
if ((data is not None) and
(saved["data"] != data)):
return None
if (saved["url"] == url and
saved["verify"] == verify and
saved["headers"] == headers):
response = CachedResponse(saved["status_code"],
saved["json_struct"])
return response
return None
def write_response_to_file(filename,
url,
verify,
headers,
status_code,
json_struct,
data=None):
"""Saves a file containing the response"""
to_save = {}
to_save["url"] = url
if data is not None:
to_save["data"] = data
to_save["verify"] = verify
to_save["headers"] = headers
to_save["status_code"] = status_code
to_save["json_struct"] = json_struct
json_data = json.dumps(to_save)
msg = "write_response_to_file: json_data = %s" % (json_data, )
if DEBUG:
print(msg)
# What is with [invalid-name] Invalid variable name "fp"
with open(filename, "w") as fp:
fp.write(json_data)
class CachedSession(object):
"""online or offline support for a requests.Session()"""
def __init__(self, online):
self.session = requests.Session()
self.online = online
def post(self, url, data, verify, headers):
"""Replaces session.post()"""
msg = ("CachedSession:post:IN: url = %s, data = %s, verify = %s,"
" headers = %s") % (url, data, verify, headers, )
if DEBUG:
print(msg)
filename = safe_filename(url)
ret = None
if self.online:
response = self.session.post(url,
data=data,
verify=verify,
headers=headers)
ret = CachedResponse(response)
else:
ret = load_response_from_file(filename,
url,
verify,
headers,
data)
if ret is None:
raise Exception("Danger <NAME>!")
msg = "CachedSession:post:OUT: ret = %s" % (ret, )
if DEBUG:
print(msg)
if self.online:
write_response_to_file(filename,
url,
verify,
headers,
response.status_code,
response.json(),
data)
return ret
def get(self, url, verify, headers):
"""Replaces session.get()"""
msg = ("CachedSession:get:IN: url = %s, verify = %s,"
" headers = %s") % (url, verify, headers, )
if DEBUG:
print(msg)
filename = safe_filename(url)
ret = None
if self.online:
response = self.session.get(url,
verify=verify,
headers=headers)
ret = CachedResponse(response)
else:
ret = load_response_from_file(filename,
url,
verify,
headers)
if ret is None:
raise Exception("D<NAME>!")
msg = "CachedSession:get:OUT: ret = %s" % (ret, )
if DEBUG:
print(msg)
if self.online:
write_response_to_file(filename,
url,
verify,
headers,
response.status_code,
response.json())
return ret
class CachedResponse(object):
"""online or offline support for a session response object"""
def __init__(self, *args, **_):
# args -- tuple of anonymous arguments
# _/kwargs -- dictionary of named arguments
self.status_code = None
self.json_struct = None
if len(args) == 1:
if isinstance(args[0], requests.models.Response):
response = args[0]
self.status_code = response.status_code
self.json_struct = response.json()
elif len(args) == 2:
if (isinstance(args[0],
int) and
isinstance(args[1],
dict)):
self.status_code = args[0]
self.json_struct = args[1]
if self.status_code is None or self.json_struct is None:
raise Exception("ruhroh")
def __getattribute__(self, name):
# We may need to call the base's getattribute to return
# stuff we support because we are limiting what you can
# ask for here.
if name in ['status_code', 'json', 'json_struct']:
return object.__getattribute__(self, name)
else:
raise AttributeError(name)
def json(self):
"""Return the JSON data"""
return self.json_struct
class OpenBMC(object):
"""Operations against a controller running OpenBMC"""
def __init__(self,
hostname,
user,
password,
online):
self.session = None
self.hostname = hostname
self.verbose = False
session = CachedSession(online)
# Log in with a special URL and JSON data structure
url = "https://%s/login" % (hostname, )
login_data = json.dumps({"data": [user, password]})
response = session.post(url,
data=login_data,
verify=False,
headers=JSON_HEADERS)
if response.status_code != 200:
err_str = ("Error: Response code to login is not 200!"
" (%d)" % (response.status_code, ))
print(err_str, file=sys.stderr)
raise HTTPError(url,
response.status_code,
data=login_data)
self.session = session
def set_verbose(self, value):
"""Set the verbosity to value"""
self.verbose = value
set_debug(value)
def enumerate(self, key):
"""Enumerate the provided key"""
if key.startswith("/"):
path = key[1:]
else:
path = key
if path.endswith("/"):
path = path + "enumerate"
else:
path = path + "/enumerate"
return self.get(path)
def get(self, key):
"""Get the value for the provided key"""
if key.startswith("/"):
path = key[1:]
else:
path = key
url = "https://%s/%s" % (self.hostname, path, )
if self.verbose:
print("GET %s" % (url, ))
response = self.session.get(url,
verify=False,
headers=JSON_HEADERS)
if response.status_code != 200:
err_str = ("Error: Response code to get %s enumerate is not 200!"
" (%d)" % (key, response.status_code, ))
print(err_str, file=sys.stderr)
raise HTTPError(url, response.status_code)
return response.json()["data"]
def _filter_org_openbmc_control(self, filter_list):
"""Filter /org/openbmc/control against the provided filter list"""
# Enumerate the inventory of the system's control hardware
mappings = {}
try:
items = self.enumerate("/org/openbmc/control/").items()
except HTTPError as ex:
if ex.get_status_code() == 404:
# @BUG
# There is no /org/openbmc/control entry?!
entries = self.get("/org/openbmc/")
msg = "Error: no /org/openbmc/control in %s" % (entries, )
raise Exception(msg)
else:
raise
# Loop through the returned map items
for (item_key, item_value) in items:
# We only care about filter entries
if not any(x in item_key for x in filter_list):
continue
if self.verbose:
print("Found:")
print(item_key)
print(item_value)
# Add the entry into our mappings
for fltr in filter_list:
idx = item_key.find(fltr)
if idx > -1:
# Get the identity (the rest of the string)
ident = item_key[idx+len(fltr):]
# Create a new map for the first time
if ident not in mappings:
mappings[ident] = {}
# Save both the full filename and map contents
mappings[ident][fltr] = (item_key, item_value)
return mappings
def _power_common(self, with_state_do):
# Query /org/openbmc/control for power and chassis entries
filter_list = ["control/power", "control/chassis"]
mappings = self._filter_org_openbmc_control(filter_list)
if mappings is None:
return False
# Loop through the found power & chassis entries
for (_, ident_mappings) in mappings.items():
# { '/power':
# ( u'/org/openbmc/control/power0',
# {u'pgood': 1,
# u'poll_interval': 3000,
# u'pgood_timeout': 10,
# u'heatbeat': 0,
# u'state': 1
# }
# ),
# '/chassis':
# ( u'/org/openbmc/control/chassis0',
# {u'reboot': 0,
# u'uuid': u'24340d83aa784d858468993286b390a5'
# }
# )
# }
# Grab our information back out of the mappings
(power_url, power_mapping) = ident_mappings["control/power"]
(chassis_url, _) = ident_mappings["control/chassis"]
if self.verbose:
msg = "Current state of %s is %s" % (power_url,
power_mapping["state"], )
print(msg)
(url, jdata) = with_state_do(power_mapping["state"],
self.hostname,
chassis_url)
if url is None:
return False
if self.verbose:
print("POST %s with %s" % (url, jdata, ))
response = self.session.post(url,
data=jdata,
verify=False,
headers=JSON_HEADERS)
if response.status_code != 200:
err_str = ("Error: Response code to PUT is not 200!"
" (%d)" % (response.status_code, ))
print(err_str, file=sys.stderr)
raise HTTPError(url, response.status_code, data=jdata)
return True
def power_on(self):
"""Turn the power on"""
def with_state_off_do(state,
hostname,
chassis_url):
"""Do something with the state"""
url = None
jdata = None
if state == 0:
# power_on called and machine is off
url = "https://%s%s/action/powerOn" % (hostname,
chassis_url, )
jdata = json.dumps({"data": []})
elif state == 1:
# power_on called and machine is on
pass
return (url, jdata)
return self._power_common(with_state_off_do)
def power_off(self):
"""Turn the power off"""
def with_state_on_do(state,
hostname,
chassis_url):
"""Do something with the state"""
url = None
jdata = | |
783143, 783149, 783151, 783191, 783193,
783197, 783227, 783247, 783257, 783259, 783269, 783283, 783317,
783323, 783329, 783337, 783359, 783361, 783373, 783379, 783407,
783413, 783421, 783473, 783487, 783527, 783529, 783533, 783553,
783557, 783569, 783571, 783599, 783613, 783619, 783641, 783647,
783661, 783677, 783689, 783691, 783701, 783703, 783707, 783719,
783721, 783733, 783737, 783743, 783749, 783763, 783767, 783779,
783781, 783787, 783791, 783793, 783799, 783803, 783829, 783869,
783877, 783931, 783953, 784009, 784039, 784061, 784081, 784087,
784097, 784103, 784109, 784117, 784129, 784153, 784171, 784181,
784183, 784211, 784213, 784219, 784229, 784243, 784249, 784283,
784307, 784309, 784313, 784321, 784327, 784349, 784351, 784367,
784373, 784379, 784387, 784409, 784411, 784423, 784447, 784451,
784457, 784463, 784471, 784481, 784489, 784501, 784513, 784541,
784543, 784547, 784561, 784573, 784577, 784583, 784603, 784627,
784649, 784661, 784687, 784697, 784717, 784723, 784727, 784753,
784789, 784799, 784831, 784837, 784841, 784859, 784867, 784897,
784913, 784919, 784939, 784957, 784961, 784981, 785003, 785017,
785033, 785053, 785093, 785101, 785107, 785119, 785123, 785129,
785143, 785153, 785159, 785167, 785203, 785207, 785219, 785221,
785227, 785249, 785269, 785287, 785293, 785299, 785303, 785311,
785321, 785329, 785333, 785341, 785347, 785353, 785357, 785363,
785377, 785413, 785423, 785431, 785459, 785461, 785483, 785501,
785503, 785527, 785537, 785549, 785569, 785573, 785579, 785591,
785597, 785623, 785627, 785641, 785651, 785671, 785693, 785717,
785731, 785737, 785753, 785773, 785777, 785779, 785801, 785803,
785809, 785839, 785857, 785861, 785879, 785903, 785921, 785923,
785947, 785951, 785963, 786001, 786013, 786017, 786031, 786047,
786053, 786059, 786061, 786077, 786109, 786127, 786151, 786167,
786173, 786179, 786197, 786211, 786223, 786241, 786251, 786271,
786307, 786311, 786319, 786329, 786337, 786349, 786371, 786407,
786419, 786431, 786433, 786449, 786469, 786491, 786547, 786551,
786553, 786587, 786589, 786613, 786629, 786659, 786661, 786673,
786691, 786697, 786701, 786703, 786707, 786719, 786739, 786763,
786803, 786823, 786829, 786833, 786859, 786881, 786887, 786889,
786901, 786931, 786937, 786941, 786949, 786959, 786971, 786979,
786983, 787021, 787043, 787051, 787057, 787067, 787069, 787079,
787091, 787099, 787123, 787139, 787153, 787181, 787187, 787207,
787217, 787243, 787261, 787277, 787289, 787309, 787331, 787333,
787337, 787357, 787361, 787427, 787429, 787433, 787439, 787447,
787469, 787477, 787483, 787489, 787513, 787517, 787519, 787529,
787537, 787541, 787547, 787573, 787601, 787609, 787621, 787639,
787649, 787667, 787697, 787711, 787747, 787751, 787757, 787769,
787771, 787777, 787783, 787793, 787807, 787811, 787817, 787823,
787837, 787879, 787883, 787903, 787907, 787939, 787973, 787981,
787993, 787999, 788009, 788023, 788027, 788033, 788041, 788071,
788077, 788087, 788089, 788093, 788107, 788129, 788153, 788159,
788167, 788173, 788189, 788209, 788213, 788231, 788261, 788267,
788287, 788309, 788317, 788321, 788351, 788353, 788357, 788363,
788369, 788377, 788383, 788387, 788393, 788399, 788413, 788419,
788429, 788449, 788467, 788479, 788497, 788521, 788527, 788531,
788537, 788549, 788561, 788563, 788569, 788603, 788621, 788651,
788659, 788677, 788687, 788701, 788719, 788761, 788779, 788789,
788813, 788819, 788849, 788863, 788867, 788869, 788873, 788891,
788897, 788903, 788927, 788933, 788941, 788947, 788959, 788971,
788993, 788999, 789001, 789017, 789029, 789031, 789067, 789077,
789091, 789097, 789101, 789109, 789121, 789133, 789137, 789149,
789169, 789181, 789221, 789227, 789251, 789311, 789323, 789331,
789343, 789367, 789377, 789389, 789391, 789407, 789419, 789443,
789473, 789491, 789493, 789511, 789527, 789533, 789557, 789571,
789577, 789587, 789589, 789611, 789623, 789631, 789653, 789671,
789673, 789683, 789689, 789709, 789713, 789721, 789731, 789739,
789749, 789793, 789823, 789829, 789847, 789851, 789857, 789883,
789941, 789959, 789961, 789967, 789977, 789979, 790003, 790021,
790033, 790043, 790051, 790057, 790063, 790087, 790093, 790099,
790121, 790169, 790171, 790189, 790199, 790201, 790219, 790241,
790261, 790271, 790277, 790289, 790291, 790327, 790331, 790333,
790351, 790369, 790379, 790397, 790403, 790417, 790421, 790429,
790451, 790459, 790481, 790501, 790513, 790519, 790523, 790529,
790547, 790567, 790583, 790589, 790607, 790613, 790633, 790637,
790649, 790651, 790693, 790697, 790703, 790709, 790733, 790739,
790747, 790753, 790781, 790793, 790817, 790819, 790831, 790843,
790861, 790871, 790879, 790883, 790897, 790927, 790957, 790961,
790967, 790969, 790991, 790997, 791003, 791009, 791017, 791029,
791047, 791053, 791081, 791093, 791099, 791111, 791117, 791137,
791159, 791191, 791201, 791209, 791227, 791233, 791251, 791257,
791261, 791291, 791309, 791311, 791317, 791321, 791347, 791363,
791377, 791387, 791411, 791419, 791431, 791443, 791447, 791473,
791489, 791519, 791543, 791561, 791563, 791569, 791573, 791599,
791627, 791629, 791657, 791663, 791677, 791699, 791773, 791783,
791789, 791797, 791801, 791803, 791827, 791849, 791851, 791887,
791891, 791897, 791899, 791909, 791927, 791929, 791933, 791951,
791969, 791971, 791993, 792023, 792031, 792037, 792041, 792049,
792061, 792067, 792073, 792101, 792107, 792109, 792119, 792131,
792151, 792163, 792179, 792223, 792227, 792229, 792241, 792247,
792257, 792263, 792277, 792283, 792293, 792299, 792301, 792307,
792317, 792359, 792371, 792377, 792383, 792397, 792413, 792443,
792461, 792479, 792481, 792487, 792521, 792529, 792551, 792553,
792559, 792563, 792581, 792593, 792601, 792613, 792629, 792637,
792641, 792643, 792647, 792667, 792679, 792689, 792691, 792697,
792703, 792709, 792713, 792731, 792751, 792769, 792793, 792797,
792821, 792871, 792881, 792893, 792907, 792919, 792929, 792941,
792959, 792973, 792983, 792989, 792991, 793043, 793069, 793099,
793103, 793123, 793129, 793139, 793159, 793181, 793187, 793189,
793207, 793229, 793253, 793279, 793297, 793301, 793327, 793333,
793337, 793343, 793379, 793399, 793439, 793447, 793453, 793487,
793489, 793493, 793511, 793517, 793519, 793537, 793547, 793553,
793561, 793591, 793601, 793607, 793621, 793627, 793633, 793669,
793673, 793691, 793699, 793711, 793717, 793721, 793733, 793739,
793757, 793769, 793777, 793787, 793789, 793813, 793841, 793843,
793853, 793867, 793889, 793901, 793927, 793931, 793939, 793957,
793967, 793979, 793981, 793999, 794009, 794011, 794023, 794033,
794039, 794041, 794063, 794071, 794077, 794089, 794111, 794113,
794119, 794137, 794141, 794149, 794153, 794161, 794173, 794179,
794191, 794201, 794203, 794207, 794221, 794231, 794239, 794249,
794327, 794341, 794363, 794383, 794389, 794399, 794407, 794413,
794449, 794471, 794473, 794477, 794483, 794491, 794509, 794531,
794537, 794543, 794551, 794557, 794569, 794579, 794587, 794593,
794641, 794653, 794657, 794659, 794669, 794693, 794711, 794741,
794743, 794749, 794779, 794831, 794879, 794881, 794887, 794921,
794923, 794953, 794957, 794993, 794999, 795001, 795007, 795023,
795071, 795077, 795079, 795083, 795097, 795101, 795103, 795121,
795127, 795139, 795149, 795161, 795187, 795203, 795211, 795217,
795233, 795239, 795251, 795253, 795299, 795307, 795323, 795329,
795337, 795343, 795349, 795427, 795449, 795461, 795467, 795479,
795493, 795503, 795517, 795527, 795533, 795539, 795551, 795581,
795589, 795601, 795643, 795647, 795649, 795653, 795659, 795661,
795667, 795679, 795703, 795709, 795713, 795727, 795737, 795761,
795763, 795791, 795793, 795797, 795799, 795803, 795827, 795829,
795871, 795877, 795913, 795917, 795931, 795937, 795941, 795943,
795947, 795979, 795983, 795997, 796001, 796009, 796063, 796067,
796091, 796121, 796139, 796141, 796151, 796171, 796177, 796181,
796189, 796193, 796217, 796247, 796259, 796267, 796291, 796303,
796307, 796337, 796339, 796361, 796363, 796373, 796379, 796387,
796391, 796409, 796447, 796451, 796459, 796487, 796493, 796517,
796531, 796541, 796553, 796561, 796567, 796571, 796583, 796591,
796619, 796633, 796657, 796673, 796687, 796693, 796699, 796709,
796711, 796751, 796759, 796769, 796777, 796781, 796799, 796801,
796813, 796819, 796847, 796849, 796853, 796867, 796871, 796877,
796889, 796921, 796931, 796933, 796937, 796951, 796967, 796969,
796981, 797003, 797009, 797021, 797029, 797033, 797039, 797051,
797053, 797057, 797063, 797077, 797119, 797131, 797143, 797161,
797171, 797201, 797207, 797273, 797281, 797287, 797309, 797311,
797333, 797353, 797359, 797383, 797389, 797399, 797417, 797429,
797473, 797497, 797507, 797509, 797539, 797549, 797551, 797557,
797561, 797567, 797569, 797579, 797581, 797591, 797593, 797611,
797627, 797633, 797647, 797681, 797689, 797701, 797711, 797729,
797743, 797747, 797767, 797773, 797813, 797833, 797851, 797869,
797887, 797897, 797911, 797917, 797933, 797947, 797957, 797977,
797987, 798023, 798043, 798059, 798067, 798071, 798079, 798089,
798097, 798101, 798121, 798131, 798139, 798143, 798151, 798173,
798179, 798191, 798197, 798199, 798221, 798223, 798227, 798251,
798257, 798263, 798271, 798293, 798319, 798331, 798373, 798383,
798397, 798403, 798409, 798443, 798451, 798461, 798481, 798487,
798503, 798517, 798521, 798527, 798533, 798569, 798599, 798613,
798641, 798647, 798649, 798667, 798691, 798697, 798701, 798713,
798727, 798737, 798751, 798757, 798773, 798781, 798799, 798823,
798871, 798887, 798911, 798923, 798929, 798937, 798943, 798961,
799003, 799021, 799031, 799061, 799063, 799091, 799093, 799103,
799147, 799151, 799171, 799217, 799219, 799223, 799259, 799291,
799301, 799303, 799307, 799313, 799333, 799343, 799361, 799363,
799369, 799417, 799427, 799441, 799453, 799471, 799481, 799483,
799489, 799507, 799523, 799529, 799543, 799553, 799573, 799609,
799613, 799619, 799621, 799633, 799637, 799651, 799657, 799661,
799679, 799723, 799727, 799739, 799741, 799753, 799759, 799789,
799801, 799807, 799817, 799837, 799853, 799859, 799873, 799891,
799921, 799949, 799961, 799979, 799991, 799993, 799999, 800011,
800029, 800053, 800057, 800077, 800083, 800089, 800113, 800117,
800119, 800123, 800131, 800143, 800159, 800161, 800171, 800209,
800213, 800221, 800231, 800237, 800243, 800281, 800287, 800291,
800311, 800329, 800333, | |
#-*- coding:utf-8 -*-
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.obj.diskfile"""
import cPickle as pickle
import os
import errno
import mock
import unittest
import email
import tempfile
import xattr
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from hashlib import md5
from contextlib import closing, nested
from gzip import GzipFile
from eventlet import tpool
from test.unit import FakeLogger, mock as unit_mock, temptree
from swift.obj import diskfile
from swift.common import utils
from swift.common.utils import hash_path, mkdirs, normalize_timestamp
from swift.common import ring
from swift.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, PathNotDir, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace
def _create_test_ring(path):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2, 3, 4, 5, 6],
[1, 2, 3, 0, 5, 6, 4],
[2, 3, 0, 1, 6, 4, 5]]
intended_devs = [
{'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000},
{'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000},
{'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000},
{'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000},
{'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000},
{'id': 5, 'device': 'sda', 'zone': 6,
'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
{'id': 6, 'device': 'sda', 'zone': 7,
'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000}]
intended_part_shift = 30
intended_reload_time = 15
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id, intended_devs,
intended_part_shift),
f)
return ring.Ring(path, ring_name='object',
reload_time=intended_reload_time)
class TestDiskFileModuleMethods(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
# Setup a test ring (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
os.mkdir(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.ring = _create_test_ring(self.testdir)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_quarantine_renamer(self):
# we use this for convenience, not really about a diskfile layout
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
exp_dir = os.path.join(self.devices, 'quarantined', 'objects',
os.path.basename(df._datadir))
qbit = os.path.join(df._datadir, 'qbit')
with open(qbit, 'w') as f:
f.write('abc')
to_dir = diskfile.quarantine_renamer(self.devices, qbit)
self.assertEqual(to_dir, exp_dir)
self.assertRaises(OSError, diskfile.quarantine_renamer, self.devices,
qbit)
def test_hash_suffix_enoent(self):
self.assertRaises(PathNotDir, diskfile.hash_suffix,
os.path.join(self.testdir, "doesnotexist"), 101)
def test_hash_suffix_oserror(self):
mocked_os_listdir = mock.Mock(
side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch("os.listdir", mocked_os_listdir):
self.assertRaises(OSError, diskfile.hash_suffix,
os.path.join(self.testdir, "doesnotexist"), 101)
def test_hash_suffix_hash_dir_is_file_quarantine(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(os.path.dirname(df._datadir))
open(df._datadir, 'wb').close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
orig_quarantine_renamer = diskfile.quarantine_renamer
called = [False]
def wrapped(*args, **kwargs):
called[0] = True
return orig_quarantine_renamer(*args, **kwargs)
try:
diskfile.quarantine_renamer = wrapped
diskfile.hash_suffix(whole_path_from, 101)
finally:
diskfile.quarantine_renamer = orig_quarantine_renamer
self.assertTrue(called[0])
def test_hash_suffix_one_file(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
f = open(
os.path.join(df._datadir,
normalize_timestamp(time() - 100) + '.ts'),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
diskfile.hash_suffix(whole_path_from, 101)
self.assertEquals(len(os.listdir(self.parts['0'])), 1)
diskfile.hash_suffix(whole_path_from, 99)
self.assertEquals(len(os.listdir(self.parts['0'])), 0)
def test_hash_suffix_oserror_on_hcl(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
f = open(
os.path.join(df._datadir,
normalize_timestamp(time() - 100) + '.ts'),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
state = [0]
orig_os_listdir = os.listdir
def mock_os_listdir(*args, **kwargs):
# We want the first call to os.listdir() to succeed, which is the
# one directly from hash_suffix() itself, but then we want to fail
# the next call to os.listdir() which is from
# hash_cleanup_listdir()
if state[0] == 1:
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
state[0] = 1
return orig_os_listdir(*args, **kwargs)
with mock.patch('os.listdir', mock_os_listdir):
self.assertRaises(OSError, diskfile.hash_suffix, whole_path_from,
101)
def test_hash_suffix_multi_file_one(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
for tdiff in [1, 50, 100, 500]:
for suff in ['.meta', '.data', '.ts']:
f = open(
os.path.join(
df._datadir,
normalize_timestamp(int(time()) - tdiff) + suff),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hsh_path = os.listdir(whole_path_from)[0]
whole_hsh_path = os.path.join(whole_path_from, hsh_path)
diskfile.hash_suffix(whole_path_from, 99)
# only the tombstone should be left
self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
def test_hash_suffix_multi_file_two(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
for tdiff in [1, 50, 100, 500]:
suffs = ['.meta', '.data']
if tdiff > 50:
suffs.append('.ts')
for suff in suffs:
f = open(
os.path.join(
df._datadir,
normalize_timestamp(int(time()) - tdiff) + suff),
'wb')
f.write('1234567890')
f.close()
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hsh_path = os.listdir(whole_path_from)[0]
whole_hsh_path = os.path.join(whole_path_from, hsh_path)
diskfile.hash_suffix(whole_path_from, 99)
# only the meta and data should be left
self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
def test_invalidate_hash(self):
def assertFileData(file_path, data):
with open(file_path, 'r') as fp:
fdata = fp.read()
self.assertEquals(pickle.loads(fdata), pickle.loads(data))
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hashes_file = os.path.join(self.objects, '0',
diskfile.HASH_FILE)
# test that non existent file except caught
self.assertEquals(diskfile.invalidate_hash(whole_path_from),
None)
# test that hashes get cleared
check_pickle_data = pickle.dumps({data_dir: None},
diskfile.PICKLE_PROTOCOL)
for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
with open(hashes_file, 'wb') as fp:
pickle.dump(data_hash, fp, diskfile.PICKLE_PROTOCOL)
diskfile.invalidate_hash(whole_path_from)
assertFileData(hashes_file, check_pickle_data)
def test_invalidate_hash_bad_pickle(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
ohash = hash_path('a', 'c', 'o')
data_dir = ohash[-3:]
whole_path_from = os.path.join(self.objects, '0', data_dir)
hashes_file = os.path.join(self.objects, '0',
diskfile.HASH_FILE)
for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
with open(hashes_file, 'wb') as fp:
fp.write('bad hash data')
try:
diskfile.invalidate_hash(whole_path_from)
except Exception as err:
self.fail("Unexpected exception raised: %s" % err)
else:
pass
def test_get_hashes(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(
os.path.join(df._datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
hashed, hashes = diskfile.get_hashes(part, do_listdir=True)
self.assertEquals(hashed, 0)
self.assert_('a83' in hashes)
hashed, hashes = diskfile.get_hashes(part, recalculate=['a83'])
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
def test_get_hashes_bad_dir(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(os.path.join(self.objects, '0', 'bad'), 'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
self.assertEquals(hashed, 1)
self.assert_('a83' in hashes)
self.assert_('bad' not in hashes)
def test_get_hashes_unmodified(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(
os.path.join(df._datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
i = [0]
def _getmtime(filename):
i[0] += 1
return 1
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=['a83'])
self.assertEquals(i[0], 2)
def test_get_hashes_unmodified_norecalc(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(
os.path.join(df._datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes_0 = diskfile.get_hashes(part)
self.assertEqual(hashed, 1)
self.assertTrue('a83' in hashes_0)
hashed, hashes_1 = diskfile.get_hashes(part)
self.assertEqual(hashed, 0)
self.assertTrue('a83' in hashes_0)
self.assertEqual(hashes_1, hashes_0)
def test_get_hashes_hash_suffix_error(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(
os.path.join(df._datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
mocked_hash_suffix = mock.MagicMock(
side_effect=OSError(errno.EACCES, os.strerror(errno.EACCES)))
with mock.patch('swift.obj.diskfile.hash_suffix', mocked_hash_suffix):
hashed, hashes = diskfile.get_hashes(part)
self.assertEqual(hashed, 0)
self.assertEqual(hashes, {'a83': None})
def test_get_hashes_unmodified_and_zero_bytes(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
part = os.path.join(self.objects, '0')
open(os.path.join(part, diskfile.HASH_FILE), 'w')
# Now the hash file is zero bytes.
i = [0]
def _getmtime(filename):
i[0] += 1
return 1
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=[])
# getmtime will actually not get called. Initially, the pickle.load
# will raise an exception first and later, force_rewrite will
# short-circuit the if clause to determine whether to write out a
# fresh hashes_file.
self.assertEquals(i[0], 0)
self.assertTrue('a83' in hashes)
def test_get_hashes_modified(self):
df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
mkdirs(df._datadir)
with open(
os.path.join(df._datadir,
normalize_timestamp(time()) + '.ts'),
'wb') as f:
f.write('1234567890')
part = os.path.join(self.objects, '0')
hashed, hashes = diskfile.get_hashes(part)
i = [0]
def _getmtime(filename):
if i[0] < 3:
i[0] += 1
return i[0]
with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
hashed, hashes = diskfile.get_hashes(
part, recalculate=['a83'])
self.assertEquals(i[0], 3)
def check_hash_cleanup_listdir(self, input_files, output_files):
file_list = list(input_files)
def mock_listdir(path):
return list(file_list)
def mock_unlink(path):
file_list.remove(os.path.basename(path))
with unit_mock({'os.listdir': mock_listdir, 'os.unlink': mock_unlink}):
self.assertEquals(diskfile.hash_cleanup_listdir('/whatever'),
output_files)
def test_hash_cleanup_listdir_purge_data_newer_ts(self):
# purge .data | |
<filename>smart-site-web/db.py<gh_stars>0
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @file: db.py
# @date: 2020/12/2
import json
import os
import time
from typing import List
import redis
import config
from utils import id_to_key, key_to_id, random_id, random_employee_id
# decode_responses设置取出为字符串
pool = redis.ConnectionPool(
host=config.redis_host,
port=config.redis_port,
decode_responses=True,
password=config.redis_password,
)
print(os.getcwd())
checkfiles_path = os.path.join(os.getcwd(), "static/checkfiles")
if not os.path.exists(checkfiles_path):
os.makedirs(checkfiles_path)
class UserInfo:
"""用户信息表:用户名,密码,用户身份"""
def __init__(self):
"""初始化表信息"""
self.__table_name = "UserInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, account: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, account)) == 1:
return True
return False
def insert(self, account: str, password: str, identity: str) -> bool:
"""
插入用户信息
:param account: 用户名
:param password: 密码
:param identity: 用户身份
:return: 成功返回 True,否则返回 False
"""
data = [account, password, identity]
return self.__r.setnx(id_to_key(self.__table_name, account), json.dumps(data))
def update(self, account: str, password: str, identity: str) -> bool:
"""
更新用户信息
:param account: 用户名
:param password: 密码
:param identity: 用户身份
:return: 成功返回 True,否则返回 False
"""
data = [account, password, identity]
if not self.is_exist(account):
return False
self.__r.set(id_to_key(self.__table_name, account), json.dumps(data))
return True
def delete(self, account: str) -> bool:
"""
删除用户信息
:param account: 用户名
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, account)) == 1:
return True
return False
def get(self, account: str) -> List:
"""
获取用户信息
:param account: 用户名
:return: [用户名, 密码, 用户身份]
"""
if not self.is_exist(account):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, account)))
class ClientInfo:
"""委托方信息表:委托方ID、委托方名称、委托方描述等"""
def __init__(self):
"""初始化表信息"""
self.__table_name = "ClientInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, client_id: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, client_id)) == 1:
return True
return False
def insert(self, client_name: str, client_description: str) -> bool:
"""
插入委托方信息
:param client_name: 委托方名称
:param client_description: 委托方描述
:return: 成功返回 True,否则返回 False
"""
client_id = random_id() # 生成委托方ID
while self.is_exist(client_id):
client_id = random_id()
data = [client_id, client_name, client_description]
return self.__r.setnx(id_to_key(self.__table_name, client_id), json.dumps(data))
def update(self, client_id: str, client_name: str, client_description: str) -> bool:
"""
插入委托方信息
:param client_id: 委托方ID
:param client_name: 委托方名称
:param client_description: 委托方描述
:return: 成功返回 True,否则返回 False
"""
data = [client_id, client_name, client_description]
if not self.is_exist(client_id):
return False
self.__r.set(id_to_key(self.__table_name, client_id), json.dumps(data))
return True
def delete(self, client_id: str) -> bool:
"""
删除委托方信息
:param client_id: 委托方ID
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, client_id)) == 1:
return True
return False
def get(self, client_id: str) -> List:
"""
获取委托方信息
:param client_id: 委托方ID
:return: [委托方ID, 委托方名称, 委托方描述]
"""
if not self.is_exist(client_id):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, client_id)))
def get_all(self):
clients = self.__r.keys(pattern="ClientInfo:*")
res = []
for client in clients:
data = self.get(key_to_id(client))
res.append(
{
"clientId": data[0],
"clientName": data[1],
"clientDescription": data[2],
}
)
return res
class ProjectInfo:
"""项目信息表:项目ID、委托方ID、检查体系ID、项目状态、项目风险值、项目创建时间、项目描述、项目负责人等"""
def __init__(self):
"""初始化表信息"""
self.__table_name = "ProjectInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, project_id: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, project_id)) == 1:
return True
return False
def insert(
self,
client_id: str,
check_system_id: str,
project_status: str,
project_risk_value: int,
project_description: str,
project_manager: str,
project_check_group_id: str,
) -> bool:
"""
插入项目信息
:param client_id: 委托方ID
:param check_system_id: 检查体系 ID
:param project_status: 项目状态
:param project_risk_value: 项目风险值
:param project_description: 项目描述
:param project_manager: 项目负责人
:param project_check_group_id: 检查小组ID
:return: 成功返回 True,否则返回 False
"""
project_id = random_id() # 生成项目ID
while self.is_exist(project_id):
project_id = random_id()
data = [
project_id,
client_id,
check_system_id,
project_status,
project_risk_value,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), # 项目创建时间
project_description,
project_manager,
project_check_group_id,
]
return self.__r.setnx(
id_to_key(self.__table_name, project_id), json.dumps(data)
)
def update(
self,
project_id,
client_id,
check_system_id,
project_status,
project_risk_value,
creation_time,
project_description,
project_manager,
project_check_group_id,
):
"""
更新合同信息
:param project_id: 项目ID
:param client_id: 委托方ID
:param check_system_id: 检查体系ID
:param project_status: 项目状态
:param project_risk_value: 项目风险值
:param creation_time: 项目创建时间
:param project_description: 项目描述
:param project_manager: 项目管理人
:param project_check_group_id: 检查小组ID
:return: 成功返回 True,否则返回 False
"""
data = [
project_id,
client_id,
check_system_id,
project_status,
project_risk_value,
creation_time,
project_description,
project_manager,
project_check_group_id,
]
if not self.is_exist(project_id):
return False
self.__r.set(id_to_key(self.__table_name, project_id), json.dumps(data))
return True
def delete(self, project_id: str) -> bool:
"""
删除项目信息
:param project_id: 项目ID
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, project_id)) == 1:
return True
return False
def get(self, project_id: str) -> List:
"""
获取项目信息
:param project_id: 项目ID
:return: [项目 ID, 委托方 ID, 检查体系 ID, 项目状态, 项目风险值, 项目创建时间, 项目描述, 项目负责人]
"""
if not self.is_exist(project_id):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, project_id)))
def get_all(self):
projects = self.__r.keys(pattern="ProjectInfo:*")
res = []
for project in projects:
data = self.get(key_to_id(project))
res.append(
{
"projectId": data[0],
"clientId": data[1],
"projectCheckSystemId": data[2],
"projectStatus": self.get_proj_status(data),
"projectRiskValue": self.get_proj_risk_value(data),
"projectCreationTime": data[5],
"projectDescription": data[6],
"projectManager": data[7],
"projectCheckGroupId": data[8],
}
)
return res
def get_proj_status(self, data):
check_keys = self.__r.keys(pattern=f"CheckInfo:{data[0]}*")
table = CheckSystemInfo()
check_systems = table.get_children(data[2])
if len(check_keys) < len(check_systems):
return "未完成"
else:
return "已完成"
def get_proj_risk_value(self, data):
check_keys = self.__r.keys(pattern=f"CheckInfo:{data[0]}*")
if len(check_keys) == 0:
return 0
table = CheckInfo()
res = 0
for check_key in check_keys:
d = table.get(key_to_id(check_key))
res += int(d[4])
return res / len(check_keys)
class ContractInfo:
"""合同信息表:合同ID、合同内容、合同创建日期、委托方ID"""
def __init__(self):
"""初始化表信息"""
self.__table_name = "ContractInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, contract_id: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, contract_id)) == 1:
return True
return False
def insert(
self,
contract_content: str,
client_id: str,
) -> bool:
"""
插入合同信息
:param contract_content: 合同内容
:param client_id: 委托方ID
:return: 成功返回 True,否则返回 False
"""
contract_id = random_id() # 生成合同ID
while self.is_exist(contract_id):
contract_id = random_id()
data = [
contract_id,
contract_content,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), # 合同创建日期
client_id,
]
return self.__r.setnx(
id_to_key(self.__table_name, contract_id), json.dumps(data)
)
def update(
self,
contract_id: str,
contract_content: str,
contract_creation_date: str,
client_id: str,
) -> bool:
"""
更新合同信息
:param contract_id: 合同ID
:param contract_content: 合同内容
:param contract_creation_date: 合同创建时间
:param client_id: 委托方ID
:return: 成功返回 True,否则返回 False
"""
data = [contract_id, contract_content, contract_creation_date, client_id]
if not self.is_exist(contract_id):
return False
self.__r.set(id_to_key(self.__table_name, contract_id), json.dumps(data))
return True
def delete(self, contract_id: str) -> bool:
"""
删除合同信息
:param contract_id: 合同ID
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, contract_id)) == 1:
return True
return False
def get(self, contract_id: str) -> List:
"""
获取合同信息
:param contract_id: 合同ID
:return: [合同ID, 合同内容, 合同创建时间, 委托方ID]
"""
if not self.is_exist(contract_id):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, contract_id)))
def get_all(self) -> List:
contracts = self.__r.keys(pattern="ContractInfo:*")
res = []
for contract in contracts:
data = self.get(key_to_id(contract))
res.append(
{
"contractId": data[0],
"contractDescription": data[1],
"createTime": data[2],
"clientId": data[3],
}
)
return res
class CheckInfo:
"""检查信息表:检查ID、项目ID、检查体系第一级ID、第二级ID、检查员员工ID、问题描述等"""
def __init__(self):
"""初始化信息表"""
self.__table_name = "CheckInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, check_id: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, check_id)) == 1:
return True
return False
def insert(
self,
check_id: str,
project_id: str,
check_system_route: str,
employee_id: str,
risk_value: str,
problem_description: str,
picture,
) -> bool:
"""
插入一个检查信息
:param check_id: 检查信息ID
:param project_id: 项目ID
:param check_system_route: 检查体系(例:安全检查->人员安全检查)
:param employee_id: 检查员员工ID
:param risk_value: 风险值
:param problem_description: 问题描述
:param picture: 序列化后的图片
:return: 成功返回 True,否则返回 False
"""
data = [
check_id,
project_id,
check_system_route,
employee_id,
risk_value,
problem_description,
]
picture.save(f"{checkfiles_path}/{check_id}.jpg")
return self.__r.set(id_to_key(self.__table_name, check_id), json.dumps(data))
def delete(self, check_id: str) -> bool:
"""
删除检查信息
:param check_id: 检查信息ID
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, check_id)) == 1:
return True
return False
def get(self, check_id: str) -> List:
"""
获取检查信息
:param check_id: 检查信息ID
:return: [检查信息ID, 项目ID, 检查体系, 用户ID, 风险值, 问题描述]
"""
if not self.is_exist(check_id):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, check_id)))
class CheckSystemInfo:
"""检查体系表:当前结点ID、前置结点ID(第一级改字段为0)等"""
def __init__(self):
"""初始化信息表"""
self.__table_name = "CheckSystemInfo" # 表名
self.__r = redis.Redis(connection_pool=pool)
def __del__(self):
self.__r.close()
def is_exist(self, check_system_id: str) -> bool:
if self.__r.exists(id_to_key(self.__table_name, check_system_id)) == 1:
return True
return False
def insert(
self, system_id: str, system_name: str, pre_id: str, system_description: str
):
"""
向检查体系树中插入一个节点
:param system_id: 当前检查体系ID
:param system_name: 当前检查体系名称
:param pre_id: 前置检查体系ID
:param system_description: 检查体系描述
:return: 成功返回 True,否则返回 False
"""
data = [system_id, system_name, pre_id, system_description]
return self.__r.setnx(id_to_key(self.__table_name, system_id), json.dumps(data))
def update(
self,
system_id: str,
system_name: str,
pre_id: str,
system_description: str,
) -> bool:
"""
更新合同信息
:param system_id: 当前检查体系ID
:param system_name: 当前检查体系名称
:param pre_id: 前置检查体系ID
:param system_description: 检查体系描述
:return: 成功返回 True,否则返回 False
"""
data = [system_id, system_name, pre_id, system_description]
if not self.is_exist(system_id):
return False
self.__r.set(id_to_key(self.__table_name, system_id), json.dumps(data))
return True
def delete(self, system_id: str) -> bool:
"""
删除检查信息
:param system_id: 检查体系ID
:return: 成功返回 True,否则返回 False
"""
if self.__r.delete(id_to_key(self.__table_name, system_id)) == 1:
return True
return False
def get(self, system_id: str) -> List:
"""
获取一个检查体系信息
:param system_id: 检查体系ID
:return: [当前检查体系ID, 当前检查体系名称, 前置检查体系ID, 检查体系描述]
"""
if not self.is_exist(system_id):
return []
return json.loads(self.__r.get(id_to_key(self.__table_name, system_id)))
def get_all(self):
check_system_keys = self.__r.keys(pattern="CheckSystemInfo:*")
res = []
table = CheckSystemInfo()
for check_system_key in check_system_keys:
tmp_data = table.get(key_to_id(check_system_key))
res.append(
{
"system_id": tmp_data[0], # 当前检查体系ID
"system_name": tmp_data[1], # 当前检查体系名称
"pre_id": tmp_data[2], # 前置检查体系ID
"system_description": tmp_data[3], # 检查体系描述
}
)
| |
'action': {'key': 'action', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
action: Optional[Union[str, "TagAction"]] = None,
**kwargs
):
super(FilteringTag, self).__init__(**kwargs)
self.name = name
self.value = value
self.action = action
class IdentityProperties(msrest.serialization.Model):
"""Identity properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The identity ID.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of resource.
:vartype tenant_id: str
:param type: Managed identity type. Possible values include: "SystemAssigned".
:type type: str or ~azure.mgmt.elastic.models.ManagedIdentityTypes
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedIdentityTypes"]] = None,
**kwargs
):
super(IdentityProperties, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class LogRules(msrest.serialization.Model):
"""Set of rules for sending logs for the Monitor resource.
:param send_aad_logs: Flag specifying if AAD logs should be sent for the Monitor resource.
:type send_aad_logs: bool
:param send_subscription_logs: Flag specifying if subscription logs should be sent for the
Monitor resource.
:type send_subscription_logs: bool
:param send_activity_logs: Flag specifying if activity logs from Azure resources should be sent
for the Monitor resource.
:type send_activity_logs: bool
:param filtering_tags: List of filtering tags to be used for capturing logs. This only takes
effect if SendActivityLogs flag is enabled. If empty, all resources will be captured. If only
Exclude action is specified, the rules will apply to the list of all available resources. If
Include actions are specified, the rules will only include resources with the associated tags.
:type filtering_tags: list[~azure.mgmt.elastic.models.FilteringTag]
"""
_attribute_map = {
'send_aad_logs': {'key': 'sendAadLogs', 'type': 'bool'},
'send_subscription_logs': {'key': 'sendSubscriptionLogs', 'type': 'bool'},
'send_activity_logs': {'key': 'sendActivityLogs', 'type': 'bool'},
'filtering_tags': {'key': 'filteringTags', 'type': '[FilteringTag]'},
}
def __init__(
self,
*,
send_aad_logs: Optional[bool] = None,
send_subscription_logs: Optional[bool] = None,
send_activity_logs: Optional[bool] = None,
filtering_tags: Optional[List["FilteringTag"]] = None,
**kwargs
):
super(LogRules, self).__init__(**kwargs)
self.send_aad_logs = send_aad_logs
self.send_subscription_logs = send_subscription_logs
self.send_activity_logs = send_activity_logs
self.filtering_tags = filtering_tags
class MonitoredResource(msrest.serialization.Model):
"""The properties of a resource currently being monitored by the Elastic monitor resource.
:param id: The ARM id of the resource.
:type id: str
:param sending_logs: Flag indicating the status of the resource for sending logs operation to
Elastic. Possible values include: "True", "False".
:type sending_logs: str or ~azure.mgmt.elastic.models.SendingLogs
:param reason_for_logs_status: Reason for why the resource is sending logs (or why it is not
sending).
:type reason_for_logs_status: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'sending_logs': {'key': 'sendingLogs', 'type': 'str'},
'reason_for_logs_status': {'key': 'reasonForLogsStatus', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
sending_logs: Optional[Union[str, "SendingLogs"]] = None,
reason_for_logs_status: Optional[str] = None,
**kwargs
):
super(MonitoredResource, self).__init__(**kwargs)
self.id = id
self.sending_logs = sending_logs
self.reason_for_logs_status = reason_for_logs_status
class MonitoredResourceListResponse(msrest.serialization.Model):
"""Response of a list operation.
:param value: Results of a list operation.
:type value: list[~azure.mgmt.elastic.models.MonitoredResource]
:param next_link: Link to the next set of results, if any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MonitoredResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MonitoredResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(MonitoredResourceListResponse, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class MonitoringTagRules(msrest.serialization.Model):
"""Capture logs and metrics of Azure resources based on ARM tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the rule set.
:vartype name: str
:ivar id: The id of the rule set.
:vartype id: str
:ivar type: The type of the rule set.
:vartype type: str
:param properties: Properties of the monitoring tag rules.
:type properties: ~azure.mgmt.elastic.models.MonitoringTagRulesProperties
:ivar system_data: The system metadata relating to this resource.
:vartype system_data: ~azure.mgmt.elastic.models.SystemData
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'MonitoringTagRulesProperties'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
properties: Optional["MonitoringTagRulesProperties"] = None,
**kwargs
):
super(MonitoringTagRules, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
self.properties = properties
self.system_data = None
class MonitoringTagRulesListResponse(msrest.serialization.Model):
"""Response of a list operation.
:param value: Results of a list operation.
:type value: list[~azure.mgmt.elastic.models.MonitoringTagRules]
:param next_link: Link to the next set of results, if any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MonitoringTagRules]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MonitoringTagRules"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(MonitoringTagRulesListResponse, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class MonitoringTagRulesProperties(msrest.serialization.Model):
"""Definition of the properties for a TagRules resource.
:param provisioning_state: Provisioning state of the monitoring tag rules. Possible values
include: "Accepted", "Creating", "Updating", "Deleting", "Succeeded", "Failed", "Canceled",
"Deleted", "NotSpecified".
:type provisioning_state: str or ~azure.mgmt.elastic.models.ProvisioningState
:param log_rules: Rules for sending logs.
:type log_rules: ~azure.mgmt.elastic.models.LogRules
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'log_rules': {'key': 'logRules', 'type': 'LogRules'},
}
def __init__(
self,
*,
provisioning_state: Optional[Union[str, "ProvisioningState"]] = None,
log_rules: Optional["LogRules"] = None,
**kwargs
):
super(MonitoringTagRulesProperties, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
self.log_rules = log_rules
class MonitorProperties(msrest.serialization.Model):
"""Properties specific to the monitor resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param provisioning_state: Provisioning state of the monitor resource. Possible values include:
"Accepted", "Creating", "Updating", "Deleting", "Succeeded", "Failed", "Canceled", "Deleted",
"NotSpecified".
:type provisioning_state: str or ~azure.mgmt.elastic.models.ProvisioningState
:param monitoring_status: Flag specifying if the resource monitoring is enabled or disabled.
Possible values include: "Enabled", "Disabled".
:type monitoring_status: str or ~azure.mgmt.elastic.models.MonitoringStatus
:param elastic_properties: Elastic cloud properties.
:type elastic_properties: ~azure.mgmt.elastic.models.ElasticProperties
:param user_info: User information.
:type user_info: ~azure.mgmt.elastic.models.UserInfo
:ivar liftr_resource_category: Possible values include: "Unknown", "MonitorLogs".
:vartype liftr_resource_category: str or ~azure.mgmt.elastic.models.LiftrResourceCategories
:ivar liftr_resource_preference: The priority of the resource.
:vartype liftr_resource_preference: int
"""
_validation = {
'liftr_resource_category': {'readonly': True},
'liftr_resource_preference': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'monitoring_status': {'key': 'monitoringStatus', 'type': 'str'},
'elastic_properties': {'key': 'elasticProperties', 'type': 'ElasticProperties'},
'user_info': {'key': 'userInfo', 'type': 'UserInfo'},
'liftr_resource_category': {'key': 'liftrResourceCategory', 'type': 'str'},
'liftr_resource_preference': {'key': 'liftrResourcePreference', 'type': 'int'},
}
def __init__(
self,
*,
provisioning_state: Optional[Union[str, "ProvisioningState"]] = None,
monitoring_status: Optional[Union[str, "MonitoringStatus"]] = None,
elastic_properties: Optional["ElasticProperties"] = None,
user_info: Optional["UserInfo"] = None,
**kwargs
):
super(MonitorProperties, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
self.monitoring_status = monitoring_status
self.elastic_properties = elastic_properties
self.user_info = user_info
self.liftr_resource_category = None
self.liftr_resource_preference = None
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider, i.e., Microsoft.Elastic.
:type provider: str
:param resource: Type on which the operation is performed, e.g., 'monitors'.
:type resource: str
:param operation: Operation type, e.g., read, write, delete, etc.
:type operation: str
:param description: Description of the operation, e.g., 'Write monitors'.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of GET request to list the Microsoft.Elastic operations.
:param value: List of operations supported by the Microsoft.Elastic provider.
:type value: list[~azure.mgmt.elastic.models.OperationResult]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationResult]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationResult"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationResult(msrest.serialization.Model):
"""A Microsoft.Elastic REST API operation.
:param name: Operation name, i.e., {provider}/{resource}/{operation}.
:type name: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: bool
:param display: The object that represents the operation.
:type display: ~azure.mgmt.elastic.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
}
def | |
<filename>dq/redis.py
import json
import logging
import redis
from dq.config import Config
from dq.logging import error
logger = logging.getLogger(__name__)
def init_redis(key):
"""Initialize a Redis connection.
:param string key: The config key. The entry should at least contain the
host, port and db number of the instance.
:returns redis: The redis instance if the config exists and is valid, and
None otherwise.
"""
cfg = Config.get(key)
if not cfg:
return None
try:
i = redis.Redis(**cfg)
# This will attempt to connect to Redis and throw an error if the
# connection is invalid.
i.info()
return i
except Exception:
error(logger, 'Unable to connect to Redis', None)
return None
def strval(value):
"""JSON serialize value as appropriate.
This function should only be used internally.
:param dict|list|string|number value: An input value.
:returns string: The output value, suitable for saving by Redis. If
``value`` is a ``dict`` or ``list``, it will be JSON-serialized.
Otherwise it will be left as-is. Note that while Redis only takes
string values, numbers have their string values be themselves in
strings, and the conversion will be done by Redis automatically.
"""
return json.dumps(value) if isinstance(value, (list, dict)) else value
def strvals(*values):
"""JSON serialize values as appropriate.
This function should only be used internally.
:param ...dict|list|string|number values: Input values.
:returns list<string>: The output values. See docs for ``strval`` for
more explanations.
"""
return [strval(v) for v in values]
class Redis(object):
_instance = init_redis('redis')
@classmethod
def exists(cls, key):
"""Whether the key exists in Redis.
:param string key: The Redis key.
:returns boolean: ``True`` if the key exists, and ``False`` otherwise.
"""
return cls._instance.exists(key)
@classmethod
def get(cls, key):
"""Get the value stored at the key.
:param string key: The Redis key.
:returns string: The value of the key. If the key does not exist,
``None`` will be returned.
"""
return cls._instance.get(key)
@classmethod
def get_json(cls, key):
"""Get the value stored at the key as JSON.
:param string key: The Redis key.
:returns object: The value of the key as an unserialized JSON object.
If the key does not exist, ``None`` will be returned.
"""
resp = cls.get(key)
return json.loads(resp) if resp else None
@classmethod
def set(cls, key, value):
"""Set the key to the specified value.
:param string key: The Redis key.
:param string value: The value to set. If this is not a string, it will
be casted to a string.
:returns boolean: ``True`` if the operation is successful.
"""
return cls._instance.set(key, strval(value))
@classmethod
def setex(cls, key, value, second):
"""Set the key to the specified value, with an expiration time.
:param string key: The Redis key.
:param string value: The value to set.
:param int second: The TTL in second.
:returns boolean: ``True`` if the operation is successful.
"""
return cls._instance.setex(key, second, strval(value))
@classmethod
def expire(cls, key, second):
"""Set the key to expire in specified second.
:param string key: The key to set expire.
:param int second: The number of seconds for the key to live.
:returns boolean: True if the operation is successful.
"""
return cls._instance.expire(key, second)
@classmethod
def rpush(cls, key, *values):
"""Add values to a Redis list from the end.
The ``values`` argument is a variable-length array and can be
specified as follows:
.. code-block:: python
redis.rpush('danqing', 'val1', 'val2')
redis.rpush('danqing', 'val1')
redis.rpush('danqing', 'val1', 'val2', 'val3')
:param string key: The key of the list. If the list does not exist yet,
it will be created.
:param string... values: A list of values to insert. If any is not a
string, it will be casted to a string.
:returns int: The total number of elements in the list after the push.
"""
return cls._instance.rpush(key, *strvals(*values))
@classmethod
def delete(cls, key):
"""Delete the key from Redis.
:param string key: The key to delete.
:returns int: The number of items deleted. If 1, the key is found and
deleted. If 0, the key is not found and nothing is done.
"""
return cls._instance.delete(key)
@classmethod
def hgetall(cls, key):
"""Get the hash table at the specified key.
:param string key: The key to fetch.
:returns dict: The hash table at the specified key. If no hash table
found (i.e. key not found), an empty dictionary is returned.
:raises redis.ResponseError: If ``key`` holds something other than a
hash table.
"""
return cls._instance.hgetall(key)
@classmethod
def hget(cls, key, hash_key):
"""Get the value for a hash key in the hash table at the specified key.
:param string key: The key to fetch.
:param string hash_key: The hash key to fetch value for in the hash
table.
:returns string: The value corresponding to the hash key in the hash
table. If either ``key`` or ``hash_key`` is not found, ``None`` is
returned.
:raises redis.ResponseError: If ``key`` holds something other than a
hash table.
"""
return cls._instance.hget(key, hash_key)
@classmethod
def hset(cls, key, hash_key, hash_value):
"""Set the value for a hash key in the hash table at the specified key.
:param string key: The key of the hash table. If the hash table does
not exist yet, it will be created.
:param string hash_key: The hash key to set value for in the hash
table.
:param string hash_value: The value to set to the key. If this is not
a string, it will be casted to a string.
:returns int: The number of new fields. If 1, a new field is added
(``hash_key`` is new). If 0, ``hash_key`` already exists and its
value is updated.
:raises redis.ResponseError: If ``key`` holds something other than a
hash table.
"""
return cls._instance.hset(key, hash_key, strval(hash_value))
@classmethod
def hdelete(cls, key, *hash_keys):
"""Delete keys from a hash table.
The ``hash_keys`` argument is a variable-length array and can be
specified as follows:
.. code-block:: python
redis.hdelete('danqing', 'key1', 'key2')
redis.hdelete('danqing', 'key2')
redis.hdelete('danqing', 'key1', 'key2', 'key3')
:param string key: The key of the hash table.
:param string... hash_keys: A list of hash keys to delete from the hash
table.
:returns int: The number of keys actually deleted. If 3 hash keys are
specified but only 1 is found (and deleted), 1 is returned.
:raises redis.ResponseError: If ``key`` holds something other than a
hash table.
"""
return cls._instance.hdel(key, *hash_keys)
@classmethod
def lpeek(cls, key, count):
"""Peek the first count elements in the list without popping.
:param string key: The key of the array.
:param int count: The number of elements to peek.
:returns list: The list of peeked elements.
"""
return cls._instance.lrange(key, 0, count - 1)
@classmethod
def lpop(cls, key, count):
"""Pop the first count elements in the list.
:param string key: The key of the array.
:param int count: The number of elements to pop. If there are fewer
than ``count`` elements, everything will be popped.
:returns list: The list of popped elements.
"""
pipe = cls._instance.pipeline()
pipe.lrange(key, 0, count - 1)
pipe.ltrim(key, count, - 1)
result = pipe.execute()
return result[0] if result[1] else []
@classmethod
def atomic_rw(cls, key, evaluator=lambda x: x):
"""Atomically read-write a Redis key.
:param string key: The key to read/write.
:param function evaluator: The evaluator function. It takes the
existing value of the key, and should return the new value. If it
returns None, no update is done and the operation is considered
aborted. If not provided, the identity function is used.
:returns *: The value returned by evaluator. If there's an
atomicity violation, None is returned.
:returns boolean: True if there's no atomicity error. That is, this
value is True if write succeeded or the user aborted, and False if
there's a atomicity violation (that prevented the write).
"""
with cls._instance.pipeline() as pipe:
try:
pipe.watch(key)
value = evaluator(pipe.get(key))
if not value:
return None, True
pipe.multi()
pipe.set(key, strval(value))
pipe.execute()
return value, True
except redis.WatchError:
return None, False
@classmethod
def atomic_rw_hash(cls, key, hash_key, evaluator=lambda x: x):
"""Atomically read-write a Redis hash key.
:param string key: The key of the hash to read/write.
:param string hash_key: The hash key within the hash to read/write.
:param function evaluator: The evaluator function. It takes the
existing value of the | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: cp1252 -*-
'''
Created on 13.03.2018
GUI_forms.py
components for forms and dialogs
@author: <NAME>
'''
# import modules:
import sys, os, shutil, time
from shutil import copyfile
from collections import namedtuple
from PyQt5.QtWidgets import (QApplication, QFileDialog, QGridLayout,
QPushButton, QMessageBox, QTextEdit,
QWidget, QHBoxLayout, QScrollArea,
QDialog, QLabel, QVBoxLayout, QGroupBox, QRadioButton,
QTableWidget, QTableWidgetItem)
from PyQt5.Qt import pyqtSlot, pyqtSignal
from PyQt5.QtGui import QIcon
import general, db_internal
from typeloader_core import make_imgt_files as MIF
from GUI_forms import (CollapsibleDialog, ChoiceSection, FileChoiceTable,
FileButton, ProceedButton, QueryButton, check_project_open)
from GUI_forms_submission_ENA import ProjectInfoTable
from GUI_misc import settings_ok
from GUI_functions_local import check_local, check_nonproductive, make_fake_ENA_file, get_pretypings_from_oracledb
# ===========================================================
# parameters:
# ===========================================================
# classes:
TargetAllele = namedtuple("TargetAllele", "gene target_allele partner_allele")
class AlleleChoiceBox(QWidget):
"""displays one target allele with multiple novel alleles,
allows selecting the one pretyping representing the target allele
"""
choice = pyqtSignal(tuple)
def __init__(self, allele_info, log):
super().__init__()
[self.sample_id_int, self.local_name, self.allele, self.alleles] = allele_info
self.log = log
self.init_UI()
def init_UI(self):
layout = QHBoxLayout(self)
self.setLayout(layout)
name_lbl = QLabel(self.local_name + ":")
layout.addWidget(name_lbl)
box = QGroupBox("Which pretyping belongs to this allele?", self)
box_layout = QHBoxLayout(box)
box.setLayout(box_layout)
layout.addWidget(box)
self.options = []
for a in self.alleles:
btn = QRadioButton(a)
box_layout.addWidget(btn)
btn.clicked.connect(self.emit_choice)
self.options.append(btn)
@pyqtSlot()
def emit_choice(self):
"""emits chosen pretyping for this target allele
"""
pretyping = self.sender().text()
self.log.info(" - {} is {}".format(self.local_name, pretyping))
self.choice.emit((self.local_name, pretyping))
class BothAllelesNovelDialog(QDialog):
"""Popup created if target locus has more than 1 allele listed as novel
"""
updated = pyqtSignal()
def __init__(self, allele_dic, settings, log):
log.info("BothAllelesNovelDialog created...")
self.log = log
self.settings = settings
self.allele_dic = allele_dic
super().__init__()
self.setWindowTitle("Multiple novel alleles")
self.setWindowIcon(QIcon(general.favicon))
self.init_UI()
self.show()
def init_UI(self):
"""establish and fill the UI
"""
self.log.info("Starting BothAllelesNovelDialog: Which pretyping is right for these alleles?")
layout = QVBoxLayout(self)
self.setLayout(layout)
lbl1 = QLabel("Attention!")
lbl1.setStyleSheet(general.label_style_2nd)
layout.addWidget(lbl1)
n = len(self.allele_dic)
msg = "{} of the alleles to be submitted contain{} ".format(n, "s" if n == 1 else "")
msg += "multiple novel alleles in the target locus.\n"
msg += "Please indicate for each, which of the pretypings belongs to the allele you want to submit here!"
lbl = QLabel(msg)
lbl.setStyleSheet(general.label_style_normal)
layout.addWidget(lbl)
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget(self.scrollArea)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
scrollArea_layout = QVBoxLayout(self.scrollAreaWidgetContents)
self.scrollAreaWidgetContents.setLayout(scrollArea_layout)
layout.addWidget(self.scrollArea)
self.choices_dic = {}
self.choice_boxes = {}
for allele in self.allele_dic:
allele_info = self.allele_dic[allele]
mybox = AlleleChoiceBox(allele_info, self.log)
self.choice_boxes[allele] = mybox
scrollArea_layout.addWidget(mybox)
self.choices_dic[allele_info[1]] = False
mybox.choice.connect(self.catch_choice)
layout.addWidget(self.scrollArea)
self.submit_btn = QPushButton("Save choices")
self.submit_btn.setEnabled(False)
self.submit_btn.clicked.connect(self.save_results)
layout.addWidget(self.submit_btn)
@pyqtSlot(tuple)
def catch_choice(self, mysignal):
"""whenever a choice is made through a radiobutton,
these are caught and stored in self.choices_dic[local_name] = pretyping
"""
(local_name, pretyping) = mysignal
alleles = self.allele_dic[local_name][-1]
partner_allele = " and ".join([allele for allele in alleles if allele != pretyping])
self.choices_dic[local_name] = (pretyping, partner_allele)
self.check_ready()
def check_ready(self):
"""checks if choices were made for all alleles;
if yes, enables submit_btn
"""
self.log.debug("Checking readiness...")
ready = True
for local_name in self.choices_dic:
if not self.choices_dic[local_name]:
ready = False
if ready:
self.log.debug("\t=> ready")
self.submit_btn.setEnabled(True)
self.submit_btn.setStyleSheet(general.btn_style_ready)
else:
self.log.debug("\t=> not ready")
self.submit_btn.setEnabled(False)
self.submit_btn.setStyleSheet(general.btn_style_normal)
def save_results(self):
"""saves the user's choices in the db and emits signal
"""
self.log.info("Saving choices to database...")
for allele in self.allele_dic:
[sample_id_int, local_name, allele_obj, _] = self.allele_dic[allele]
choice = self.choices_dic[local_name]
if "*" in choice[0]:
(target, partner) = choice
else:
target = "{}*{}".format(allele_obj.gene, choice[0])
partner = "{}*{}".format(allele_obj.gene, choice[1])
query = """update ALLELES
set target_allele = '{}', partner_allele = '{}'
where local_name = '{}' and sample_id_int = '{}'""".format(target, partner, local_name, sample_id_int)
success, _ = db_internal.execute_query(query, 0, self.log, "updating database", "Database error", self)
if success:
self.updated.emit()
self.close()
class InvalidPretypingsDialog(QDialog):
"""Popup created if pretypings are not consistent with TypeLoader's assigned allele
"""
ok = pyqtSignal()
def __init__(self, allele_dic, settings, log):
self.log = log
self.settings = settings
self.allele_dic = allele_dic
super().__init__()
self.setWindowTitle("Invalid pretypings")
self.setWindowIcon(QIcon(general.favicon))
self.resize(800, 400)
self.init_UI()
self.show()
def init_UI(self):
"""establish and fill the UI
"""
self.log.info("Starting InvalidPretypingsDialog: please adjust pretypings file for these alleles...")
layout = QVBoxLayout(self)
self.setLayout(layout)
lbl1 = QLabel("Attention!")
lbl1.setStyleSheet(general.label_style_2nd)
layout.addWidget(lbl1)
n = len(self.allele_dic)
msg = "{} of the alleles to be submitted ha{} ".format(n, "s" if n == 1 else "ve")
msg += "an invalid pretyping for the specfied locus.\n"
msg += "Please adjust the pretypings file for each indicated sample, then try again!"
lbl = QLabel(msg)
lbl.setStyleSheet(general.label_style_normal)
layout.addWidget(lbl)
self.add_table(layout)
self.ok_btn = QPushButton("Ok")
self.ok_btn.clicked.connect(self.ok_clicked)
self.ok_btn.setStyleSheet(general.btn_style_ready)
layout.addWidget(self.ok_btn)
def add_table(self, layout):
"""add and fill the table with problematic alleles
"""
self.table = QTableWidget()
self.table.setColumnCount(6)
self.table.setRowCount(len(self.allele_dic))
layout.addWidget(self.table)
self.table.setHorizontalHeaderLabels(
["Sample", "Allele Name", "Locus", "Assigned Allele", "Pretyping", "Problem"])
for n, allele in enumerate(self.allele_dic):
i = 0
for item in self.allele_dic[allele]:
self.table.setItem(n, i, QTableWidgetItem(item))
i += 1
self.table.resizeColumnsToContents()
def ok_clicked(self):
self.log.debug("User clicked 'ok' on InvalidPretypingsDialog")
self.ok.emit()
self.close()
class IPDCounterLockedDialog(QMessageBox):
"""Popup created if IPD-counter is locked, allows removal of lock
"""
remove_lock = pyqtSignal(bool)
def __init__(self, parent, title, text, settings, log):
self.log = log
self.settings = settings
super().__init__(parent)
self.setIcon(QMessageBox.Warning)
self.setText(text)
self.setWindowTitle(title)
self.init_UI()
self.show()
def init_UI(self):
"""establish and fill the UI
"""
self.log.info("Starting IPDCounterLockedDialog...")
self.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
self.setDefaultButton(QMessageBox.Cancel)
self.abort_txt = "Ok, I'll try again later."
self.proceed_txt = "Proceed anyway."
self.button(QMessageBox.Cancel).setText(self.abort_txt)
self.button(QMessageBox.Ok).setText(self.proceed_txt)
self.buttonClicked.connect(self.handle_click)
def handle_click(self, button):
"""handles clicks on either of the buttons
"""
txt = button.text()
if txt == self.proceed_txt:
self.proceed()
elif txt == self.abort_txt:
self.abort()
def proceed(self):
"""asks for confirmation, then emits signal to remove lock file
"""
self.log.info("User decision: remove lock on IPD_counter!")
self.log.info("Are you sure?")
reply = QMessageBox.question(self, "Please confirm",
"Are you REALLY sure no other user is currently creating IPD files and you can continue safely?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.log.info("\t=> Yes, remove lock!")
self.remove_lock.emit(True)
else:
self.log.info("\t=> No, I'd rather check again.")
self.remove_lock.emit(False)
self.close()
def abort(self):
"""aborts the attempt
"""
self.log.info("User decision: abort attempt for now and try again later.")
self.remove_lock.emit(False)
self.close()
class IPDFileChoiceTable(FileChoiceTable):
"""displays all alleles of a project
so user can choose which to submit to IPD
"""
old_cell_lines = pyqtSignal(dict)
additional_info = pyqtSignal(dict)
def __init__(self, project, log, parent=None):
query = """select project_nr, alleles.sample_id_int, alleles.local_name, allele_status,
ena_submission_id,
case
when instr(IPD_SUBMISSION_NR, '_') > 0
then substr(IPD_SUBMISSION_NR, 1, instr(IPD_SUBMISSION_NR, '_')-1)
else
IPD_SUBMISSION_NR
end as IPD_SUBMISSION_NR,
cell_line_old, gene, target_allele, partner_allele
from alleles
join files on alleles.sample_id_int = files.sample_id_int and alleles.allele_nr = files.allele_nr
""".format(project) #TODO: is this format() still necessary or leftover code?
num_columns = 10
header = ["Submit?", "Nr", "Sample", "Allele", "Allele Status", "ENA submission ID", "IPD submission ID"]
if parent:
self.settings = parent.settings
else:
import GUI_login
self.settings = GUI_login.get_settings("admin", log)
super().__init__(project, log, header, query, num_columns,
myfilter=" order by project_nr ", allele_status_column=3,
instant_accept_status="ENA submitted", parent=self)
def get_data(self):
"""get alleles from database
"""
myquery = self.query + self.myfilter
success, data = db_internal.execute_query(myquery, self.num_columns,
self.log, "retrieving data for FileChoiceTable from database",
"Database error", self)
if success:
self.data1 = data
# add data based on cell_line_old:
success, data2 = db_internal.execute_query(self.query + self.myfilter2, self.num_columns,
self.log, "retrieving data for FileChoiceTable from database",
"Database error", self)
if success:
self.data2 = data2
# assemble data from both queries into one dict:
self.cell_line_dic = {}
self.allele_dic = {} # contains additional info per local_name not displayed in the table
self.data = []
if self.data1:
self.log.debug("\t{} matching alleles found based on local_name".format(len(self.data1)))
for row in self.data1:
self.data.append(row[:-2])
local_name = row[2]
gene = row[7]
target_allele = row[8]
partner_allele = row[9]
allele = TargetAllele(gene=gene, target_allele=target_allele, partner_allele=partner_allele)
self.allele_dic[local_name] = allele
if self.data2:
self.log.debug("\t{} matching alleles found based on cell_line_old".format(len(self.data2)))
for row in self.data2:
self.data.append(row[:-2])
local_name = row[2]
cell_line_old = row[6]
self.cell_line_dic[local_name] = cell_line_old
gene = row[7]
target_allele = row[8]
partner_allele = row[9]
allele = TargetAllele(gene=gene, target_allele=target_allele, partner_allele=partner_allele)
self.allele_dic[local_name] = allele
self.log.debug("Emitting 'files = {}'".format(len(self.data)))
self.files.emit(len(self.data))
self.old_cell_lines.emit(self.cell_line_dic)
self.additional_info.emit(self.allele_dic)
def refresh(self, project, addfilter, addfilter2, keep_choices=False):
self.log.debug("refreshing IPDFileChoiceTable...")
self.keep_choices = keep_choices
self.myfilter = " where alleles.project_name = '{}' {} order by project_nr".format(project, addfilter)
self.myfilter2 = " where alleles.project_name = '{}' {} order by project_nr".format(project, addfilter2)
self.fill_UI()
class IPDSubmissionForm(CollapsibleDialog):
"""a popup widget to upload alleles of a project to IPD
"""
IPD_submitted = pyqtSignal()
def __init__(self, log, mydb, project, settings, parent=None):
"""initiates the IPDSubmissionForm
"""
self.log = log
self.log.info("Opening 'IPD Submission' Dialog...")
self.mydb = mydb
if check_project_open(project, log, parent=parent):
self.project = project
else:
self.project = ""
self.settings = settings
self.label_width = 150
super().__init__(parent)
self.resize(1250, 500)
self.setWindowTitle("Submit alleles to IPD")
self.setWindowIcon(QIcon(general.favicon))
self.samples = []
self.file_dic = {}
self.add_filter = ""
self.title = ""
self.description = ""
self.imgt_files = {}
self.submission_successful = False
self.accepted = False
self.multis_handled | |
r is not s2
True
>>>
>>> single2 = class_()
>>> single2.bind(SingleLinked, {"next": (single2, None)},
... recreate_object = False)
>>> r = check_type(s1, single2)
>>> r is s1
True
>>> r = check_type(s2, single2)
>>> r is s2
True
>>>
>>> single_to_double = class_()
>>>
>>> def _modify_node(o):
... if o.next:
... o.next.prev = o
... if not hasattr(o, 'prev'):
... o.prev = None
...
>>> def _check(x):
... if hasattr(x, 'prev'):
... if x.prev.name == "C" and x.name == "A":
... return False
... if x.name == "C" and x.next is not None and \\
... hasattr(x.next, 'name') and x.next.name == 'A':
... return False
... return True
...
>>> single_to_double.bind(SingleLinked,
... {"next": (single_to_double, None)},
... check_before = lambda x: x.name != "",
... check = _check,
... recreate_object =
... lambda: DoubleLinked.__new__(
... DoubleLinked),
... modify = _modify_node)
>>>
>>> check_type(SingleLinked(""), single_to_double) \
# doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: ... cannot match type class_(...): \
check_before returns False
>>> r = check_type(s1, single_to_double)
>>> (r.prev, r.name, r.next.name, r.next.next.name, r.next.next.next) \\
... == (None, "A", "B", "C", None)
True
>>> r.next.prev is r
True
>>>
>>> r = check_type(s2, single_to_double)
>>> (r.prev.name, r.name, r.next.name, r.next.next.name,
... r.next.next.next.name) == \\
... ("A", "C", "B", "A", "C")
True
>>> r.next.next.next is r
True
>>> r.prev.prev.prev is r
True
>>>
>>> def _check2(x):
... if hasattr(x, 'prev'):
... if x.prev.name == "A" and x.name == "C":
... return False
... if x.name == "A" and x.next is not None and \\
... hasattr(x.next, 'name') and x.next.name == 'C':
... return False
... return True
...
>>> single_to_double2 = class_()
>>> single_to_double2.bind(SingleLinked,
... {"next": (single_to_double2, None)},
... check_before = lambda x: x.name != "",
... check = _check2,
... recreate_object =
... lambda: DoubleLinked.__new__(
... DoubleLinked),
... modify = _modify_node)
>>> check_type(s2, single_to_double2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: ... cannot match type ...: \
check returns False
"""
def bind(self, object_type, property_check = {},
recreate_object = True,
check = None,
check_before = None,
modify = None,
merge = default_object_merger):
"""
:param object_type: a user-defined class
:param property_check: type check for object __dict__.
The checked result will be updated to
object __dict__.
:param recreate_object: if a callable is passed in, use it to
create a new object; use
`object_type.__new__(object_type)`
to create a new object if True;
use the original object else
(**WARNING: this may modify the
original object**)
:param check: run an additional check for created object
:param check_before: run a check before property checking
:param modify: modify the object after type check
:param merge: customize property merge process
Sequence: check object_type -> check_before -> recreate_object ->
check property -> merge -> check -> modify
"""
self.object_type = object_type
self.property_check = property_check
if callable(recreate_object):
self._recreate_object = recreate_object
elif recreate_object:
self._recreate_object = lambda: object_type.__new__(object_type)
else:
self._recreate_object = None
self._check, self._check_msg = _parse_checker(check, 'check returns False')
self._check_before, self._check_before_msg = \
_parse_checker(check_before, 'check_before returns False')
self._modify = modify
self._merge = merge
def pre_check_type(self, value):
if not isinstance(value, self.object_type):
raise TypeMismatchException(value, self, "class type mismatch")
if self._check_before is not None:
if not _guard_checker(value, self, self._check_before, value):
raise TypeMismatchException(value, self,
self._check_before_msg)
if self._recreate_object is not None:
return _guard_checker(value, self, self._recreate_object)
else:
return value
def final_check_type(self, value, current_result, recursive_check_type):
d = recursive_check_type(value.__dict__, self.property_check)
if self._merge is not None:
_guard_checker(value, self, self._merge, current_result, d)
if self._check is not None:
if not _guard_checker(value, self, self._check, current_result):
raise TypeMismatchException(value, self,
self._check_msg)
if self._modify is not None:
_guard_checker(value, self, self._modify, current_result)
return current_result
@recursive_repr()
def __repr__(self):
return 'class_(' + repr(self.object_type) + ', ' + \
repr(self.property_check) + ')'
class_ = ObjectChecker
class TypeChecker(ExtraChecker):
"""
Check an input variable is a class, and (optionally)
a subclass of `baseclass`, and (optionally) has a metaclass
of `metaclass`.
Examples::
>>> t = type_(int)
>>> t # doctest: +ELLIPSIS
type_(<... 'int'>)
>>> check_type(bool, t) # doctest: +ELLIPSIS
<... 'bool'>
>>> check_type(str, t) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeMismatchException: <... 'str'> cannot match type type_(<... 'int'>): must be a subclass of <... 'int'>
"""
def _check(self, value):
if not issubclass(value, self._baseclass):
raise TypeMismatchException(value, self, "must be a subclass of " + repr(self._baseclass))
return True
def bind(self, baseclass=None, metaclass=type):
"""
:param baseclass: if not None, check the input is a subclass of `baseclass`
:param metaclass: if not None, check the input is an instance of `metaclass`
"""
self._metaclass = metaclass
self._baseclass = baseclass
if not isinstance(metaclass, type):
raise InvalidTypeException(self, repr(metaclass) + " is not a metaclass")
if baseclass is None:
ExtraChecker.bind(self, metaclass)
else:
if not isinstance(baseclass, type):
raise InvalidTypeException(self, repr(metaclass) + " is not a baseclass")
ExtraChecker.bind(self, metaclass, check=self._check)
def __repr__(self):
return "type_(" + ("" if self._baseclass is None else repr(self._baseclass)) + \
("" if self._metaclass is type else "metaclass=" + repr(self._metaclass)) + ")"
def type_(baseclass=None, metaclass=type):
"""
Create a TypeChecker
"""
return TypeChecker(baseclass, metaclass)
class _StackedDict(object):
__slots__ = ('_check', '_check_stack')
def __init__(self, *args, **kwargs):
self._check = [dict(*args, **kwargs)]
self._check_stack = []
def __contains__(self, key):
return any(key in d for d in self._check)
def get(self, k, d = None):
for di in reversed(self._check):
if k in di:
return di[k]
return d
def __getitem__(self, key):
for di in reversed(self._check):
if k in di:
return di[k]
raise KeyError(key)
def __setitem__(self, key, value):
self._check[-1][key] = value
def snapshot(self):
if len(self._check) >= 20:
self._check_stack.append(self._check)
merge_snapshots = {}
for d in self._check:
merge_snapshots.update(d)
self._check = [merge_snapshots]
new_dict = {}
self._check.append(new_dict)
def _pop(self):
last_dict = self._check.pop()
if self._check_stack and len(self._check) <= 1:
self._check = self._check_stack.pop()
return last_dict
def discard_snapshot(self):
self._pop()
def merge_snapshot(self):
last_dict = self._pop()
to_merge = self._check[-1]
if len(to_merge) < len(last_dict):
last_dict.update(to_merge)
self._check[-1] = last_dict
else:
self._check[-1].update(last_dict)
def _customized_check(value, type_, checker, _recursive_check, _type_cache):
current_check, succeeded_check, failed_check, list_loop = \
_recursive_check
check_id = (id(value), id(type_))
if check_id in list_loop:
raise TypeMismatchException(value, type_)
current_result = checker.pre_check_type(value)
if current_result is None:
# Prevent an infinite loop
list_loop[check_id] = (value, type_)
try:
current_result = checker.final_check_type(
value,
None,
lambda value, type, path=None:
_append_path(_check_type_inner, path,
value, type, _recursive_check, _type_cache)
)
finally:
del list_loop[check_id]
else:
current_check[check_id] = (current_result, value, type_)
# backup succedded check: it may depends on current result.
# If the type match fails, revert all succeeded check
succeeded_check.snapshot()
_new_recursive_check = (current_check, succeeded_check,
failed_check, {})
try:
checker.final_check_type(
value,
current_result,
lambda value, type, path=None:
_append_path(_check_type_inner, path,
value, type, _new_recursive_check, _type_cache)
)
except:
succeeded_check.discard_snapshot()
raise
else:
succeeded_check.merge_snapshot()
return current_result
def _indent(text, prepend = ' '):
return ''.join(prepend + l for l in text.splitlines(True))
def _check_type_inner(value, type_, _recursive_check = None, _type_cache = None):
# print('Check type:', value, id(value), type_, id(type_))
if _recursive_check is None:
# current, succeeded, failed, listloop
# each has id-tuple as their key, and (result, value, type_) as the value.
# we must store the used value ans types to prevent them from being collected,
# or the ids may be reused
_recursive_check = ({}, _StackedDict(), {}, {})
if _type_cache is None:
_type_cache = {}
current_check, succeeded_check, failed_check, list_loop = \
_recursive_check
# Use (id(value), id(type)) to store matches that are done before
check_id = (id(value), id(type_))
_succ = succeeded_check.get(check_id)
if _succ is not None:
# This match is already done, return the result
# print('Hit succedded cache:', succeeded_check[check_id],
# id(succeeded_check[check_id]))
return _succ[0]
elif check_id in failed_check:
# This match is already failed, raise the exception
exc = failed_check[check_id][0]
if isinstance(exc, TypeMismatchException):
exc = exc.clone()
else:
exc = _copy(exc)
raise exc
elif check_id in current_check:
# print('Hit succedded cache:', current_check[check_id],
# id(current_check[check_id]))
# This match is in-operation. The final result is depended by
# itself. Return the object itself to form a recursive structure.
return current_check[check_id][0]
return_value = None
try:
if type_ is None:
# Match None only
if value is not None:
raise TypeMismatchException(value, type_)
else:
return_value = value
elif type_ == ():
if value | |
"""
Gauss-Patterson quadrature rule.
Adapted from <NAME>'s implementation in Fortran
Licensing
---------
This code is distributed under the GNU LGPL license.
"""
import numpy
import chaospy.quad
def quad_gauss_patterson(order, dist):
"""
Generate sets abscissas and weights for Gauss-Patterson quadrature.
Args:
order (int) : The quadrature order. Must be in the interval (0, 8).
dist (Dist) : The domain to create quadrature over.
Returns:
(numpy.ndarray numpy.ndarray) : Abscissas and weights.
Example:
>>> absci, weights = chaospy.quad_gauss_patterson(3, chaospy.Uniform(0, 1))
>>> print(numpy.around(absci, 4))
[[0.0031 0.0198 0.0558 0.1127 0.1894 0.2829 0.3883 0.5 0.6117 0.7171
0.8106 0.8873 0.9442 0.9802 0.9969]]
>>> print(numpy.around(weights, 4))
[0.0085 0.0258 0.0465 0.0672 0.0858 0.1003 0.1096 0.1128 0.1096 0.1003
0.0858 0.0672 0.0465 0.0258 0.0085]
Reference:
<NAME>, <NAME>,
Handbook of Computational Methods for Integration,
Chapman and Hall, 2004,
ISBN: 1-58488-428-2,
LC: QA299.3.K98.
<NAME>,
The Optimal Addition of Points to Quadrature Formulae,
Mathematics of Computation,
Volume 22, Number 104, October 1968, pages 847-856.
"""
if len(dist) > 1:
if isinstance(order, int):
values = [quad_gauss_patterson(order, d) for d in dist]
else:
values = [quad_gauss_patterson(order[i], dist[i])
for i in range(len(dist))]
abscissas = [_[0][0] for _ in values]
weights = [_[1] for _ in values]
abscissas = chaospy.quad.combine(abscissas).T
weights = numpy.prod(chaospy.quad.combine(weights), -1)
return abscissas, weights
order = sorted(PATTERSON_VALUES.keys())[order]
abscissas, weights = PATTERSON_VALUES[order]
lower, upper = dist.range()
abscissas = .5*(abscissas*(upper-lower)+upper+lower)
weights *= dist.pdf(abscissas)
weights /= numpy.sum(weights)
abscissas = abscissas.reshape(1, abscissas.size)
return abscissas, weights
PATTERSON_VALUES = {
0 : ((0e+00,), (2.0e+00,)),
3 : ((
-0.77459666924148337704e+00, 0.0e+00, 0.77459666924148337704e+00,
), (
0.555555555555555555556e+00, 0.888888888888888888889e+00,
0.555555555555555555556e+00,
)),
7 : ((
-0.96049126870802028342e+00, -0.77459666924148337704e+00,
-0.43424374934680255800e+00, 0.0e+00, 0.43424374934680255800e+00,
0.77459666924148337704e+00, 0.96049126870802028342e+00,
), (
0.104656226026467265194e+00, 0.268488089868333440729e+00,
0.401397414775962222905e+00, 0.450916538658474142345e+00,
0.401397414775962222905e+00, 0.268488089868333440729e+00,
0.104656226026467265194e+00,
)),
15 : ((
-0.99383196321275502221e+00, -0.96049126870802028342e+00,
-0.88845923287225699889e+00, -0.77459666924148337704e+00,
-0.62110294673722640294e+00, -0.43424374934680255800e+00,
-0.22338668642896688163e+00, 0.0e+00, 0.22338668642896688163e+00,
0.43424374934680255800e+00, 0.62110294673722640294e+00,
0.77459666924148337704e+00, 0.88845923287225699889e+00,
0.96049126870802028342e+00, 0.99383196321275502221e+00,
), (
0.0170017196299402603390e+00, 0.0516032829970797396969e+00,
0.0929271953151245376859e+00, 0.134415255243784220360e+00,
0.171511909136391380787e+00, 0.200628529376989021034e+00,
0.219156858401587496404e+00, 0.225510499798206687386e+00,
0.219156858401587496404e+00, 0.200628529376989021034e+00,
0.171511909136391380787e+00, 0.134415255243784220360e+00,
0.0929271953151245376859e+00, 0.0516032829970797396969e+00,
0.0170017196299402603390e+00,
)),
31 : ((
-0.99909812496766759766e+00, -0.99383196321275502221e+00,
-0.98153114955374010687e+00, -0.96049126870802028342e+00,
-0.92965485742974005667e+00, -0.88845923287225699889e+00,
-0.83672593816886873550e+00, -0.77459666924148337704e+00,
-0.70249620649152707861e+00, -0.62110294673722640294e+00,
-0.53131974364437562397e+00, -0.43424374934680255800e+00,
-0.33113539325797683309e+00, -0.22338668642896688163e+00,
-0.11248894313318662575e+00, 0.0e+00, 0.11248894313318662575e+00,
0.22338668642896688163e+00, 0.33113539325797683309e+00,
0.43424374934680255800e+00, 0.53131974364437562397e+00,
0.62110294673722640294e+00, 0.70249620649152707861e+00,
0.77459666924148337704e+00, 0.83672593816886873550e+00,
0.88845923287225699889e+00, 0.92965485742974005667e+00,
0.96049126870802028342e+00, 0.98153114955374010687e+00,
0.99383196321275502221e+00, 0.99909812496766759766e+00,
), (
0.00254478079156187441540e+00, 0.00843456573932110624631e+00,
0.0164460498543878109338e+00, 0.0258075980961766535646e+00,
0.0359571033071293220968e+00, 0.0464628932617579865414e+00,
0.0569795094941233574122e+00, 0.0672077542959907035404e+00,
0.0768796204990035310427e+00, 0.0857559200499903511542e+00,
0.0936271099812644736167e+00, 0.100314278611795578771e+00,
0.105669893580234809744e+00, 0.109578421055924638237e+00,
0.111956873020953456880e+00, 0.112755256720768691607e+00,
0.111956873020953456880e+00, 0.109578421055924638237e+00,
0.105669893580234809744e+00, 0.100314278611795578771e+00,
0.0936271099812644736167e+00, 0.0857559200499903511542e+00,
0.0768796204990035310427e+00, 0.0672077542959907035404e+00,
0.0569795094941233574122e+00, 0.0464628932617579865414e+00,
0.0359571033071293220968e+00, 0.0258075980961766535646e+00,
0.0164460498543878109338e+00, 0.00843456573932110624631e+00,
0.00254478079156187441540e+00,
)),
63 : ((
-0.99987288812035761194e+00, -0.99909812496766759766e+00,
-0.99720625937222195908e+00, -0.99383196321275502221e+00,
-0.98868475754742947994e+00, -0.98153114955374010687e+00,
-0.97218287474858179658e+00, -0.96049126870802028342e+00,
-0.94634285837340290515e+00, -0.92965485742974005667e+00,
-0.91037115695700429250e+00, -0.88845923287225699889e+00,
-0.86390793819369047715e+00, -0.83672593816886873550e+00,
-0.80694053195021761186e+00, -0.77459666924148337704e+00,
-0.73975604435269475868e+00, -0.70249620649152707861e+00,
-0.66290966002478059546e+00, -0.62110294673722640294e+00,
-0.57719571005204581484e+00, -0.53131974364437562397e+00,
-0.48361802694584102756e+00, -0.43424374934680255800e+00,
-0.38335932419873034692e+00, -0.33113539325797683309e+00,
-0.27774982202182431507e+00, -0.22338668642896688163e+00,
-0.16823525155220746498e+00, -0.11248894313318662575e+00,
-0.056344313046592789972e+00, 0.0e+00, 0.056344313046592789972e+00,
0.11248894313318662575e+00, 0.16823525155220746498e+00,
0.22338668642896688163e+00, 0.27774982202182431507e+00,
0.33113539325797683309e+00, 0.38335932419873034692e+00,
0.43424374934680255800e+00, 0.48361802694584102756e+00,
0.53131974364437562397e+00, 0.57719571005204581484e+00,
0.62110294673722640294e+00, 0.66290966002478059546e+00,
0.70249620649152707861e+00, 0.73975604435269475868e+00,
0.77459666924148337704e+00, 0.80694053195021761186e+00,
0.83672593816886873550e+00, 0.86390793819369047715e+00,
0.88845923287225699889e+00, 0.91037115695700429250e+00,
0.92965485742974005667e+00, 0.94634285837340290515e+00,
0.96049126870802028342e+00, 0.97218287474858179658e+00,
0.98153114955374010687e+00, 0.98868475754742947994e+00,
0.99383196321275502221e+00, 0.99720625937222195908e+00,
0.99909812496766759766e+00, 0.99987288812035761194e+00,
), (
0.000363221481845530659694e+00, 0.00126515655623006801137e+00,
0.00257904979468568827243e+00, 0.00421763044155885483908e+00,
0.00611550682211724633968e+00, 0.00822300795723592966926e+00,
0.0104982469096213218983e+00, 0.0129038001003512656260e+00,
0.0154067504665594978021e+00, 0.0179785515681282703329e+00,
0.0205942339159127111492e+00, 0.0232314466399102694433e+00,
0.0258696793272147469108e+00, 0.0284897547458335486125e+00,
0.0310735511116879648799e+00, 0.0336038771482077305417e+00,
0.0360644327807825726401e+00, 0.0384398102494555320386e+00,
0.0407155101169443189339e+00, 0.0428779600250077344929e+00,
0.0449145316536321974143e+00, 0.0468135549906280124026e+00,
0.0485643304066731987159e+00, 0.0501571393058995374137e+00,
0.0515832539520484587768e+00, 0.0528349467901165198621e+00,
0.0539054993352660639269e+00, 0.0547892105279628650322e+00,
0.0554814043565593639878e+00, 0.0559784365104763194076e+00,
0.0562776998312543012726e+00, 0.0563776283603847173877e+00,
0.0562776998312543012726e+00, 0.0559784365104763194076e+00,
0.0554814043565593639878e+00, 0.0547892105279628650322e+00,
0.0539054993352660639269e+00, 0.0528349467901165198621e+00,
0.0515832539520484587768e+00, 0.0501571393058995374137e+00,
0.0485643304066731987159e+00, 0.0468135549906280124026e+00,
0.0449145316536321974143e+00, 0.0428779600250077344929e+00,
0.0407155101169443189339e+00, 0.0384398102494555320386e+00,
0.0360644327807825726401e+00, 0.0336038771482077305417e+00,
0.0310735511116879648799e+00, 0.0284897547458335486125e+00,
0.0258696793272147469108e+00, 0.0232314466399102694433e+00,
0.0205942339159127111492e+00, 0.0179785515681282703329e+00,
0.0154067504665594978021e+00, 0.0129038001003512656260e+00,
0.0104982469096213218983e+00, 0.00822300795723592966926e+00,
0.00611550682211724633968e+00, 0.00421763044155885483908e+00,
0.00257904979468568827243e+00, 0.00126515655623006801137e+00,
0.000363221481845530659694e+00,
)),
127 : ((
-0.99998243035489159858e+00, -0.99987288812035761194e+00,
-0.99959879967191068325e+00, -0.99909812496766759766e+00,
-0.99831663531840739253e+00, -0.99720625937222195908e+00,
-0.99572410469840718851e+00, -0.99383196321275502221e+00,
-0.99149572117810613240e+00, -0.98868475754742947994e+00,
-0.98537149959852037111e+00, -0.98153114955374010687e+00,
-0.97714151463970571416e+00, -0.97218287474858179658e+00,
-0.96663785155841656709e+00, -0.96049126870802028342e+00,
-0.95373000642576113641e+00, -0.94634285837340290515e+00,
-0.93832039777959288365e+00, -0.92965485742974005667e+00,
-0.92034002547001242073e+00, -0.91037115695700429250e+00,
-0.89974489977694003664e+00, -0.88845923287225699889e+00,
-0.87651341448470526974e+00, -0.86390793819369047715e+00,
-0.85064449476835027976e+00, -0.83672593816886873550e+00,
-0.82215625436498040737e+00, -0.80694053195021761186e+00,
-0.79108493379984836143e+00, -0.77459666924148337704e+00,
-0.75748396638051363793e+00, -0.73975604435269475868e+00,
-0.72142308537009891548e+00, -0.70249620649152707861e+00,
-0.68298743109107922809e+00, -0.66290966002478059546e+00,
-0.64227664250975951377e+00, -0.62110294673722640294e+00,
-0.59940393024224289297e+00, -0.57719571005204581484e+00,
-0.55449513263193254887e+00, -0.53131974364437562397e+00,
-0.50768775753371660215e+00, -0.48361802694584102756e+00,
-0.45913001198983233287e+00, -0.43424374934680255800e+00,
-0.40897982122988867241e+00, -0.38335932419873034692e+00,
-0.35740383783153215238e+00, -0.33113539325797683309e+00,
-0.30457644155671404334e+00, -0.27774982202182431507e+00,
-0.25067873030348317661e+00, -0.22338668642896688163e+00,
-0.19589750271110015392e+00, -0.16823525155220746498e+00,
-0.14042423315256017459e+00, -0.11248894313318662575e+00,
-0.084454040083710883710e+00, -0.056344313046592789972e+00,
-0.028184648949745694339e+00, 0.0e+00, 0.028184648949745694339e+00,
0.056344313046592789972e+00, 0.084454040083710883710e+00,
0.11248894313318662575e+00, 0.14042423315256017459e+00,
0.16823525155220746498e+00, 0.19589750271110015392e+00,
0.22338668642896688163e+00, 0.25067873030348317661e+00,
0.27774982202182431507e+00, 0.30457644155671404334e+00,
0.33113539325797683309e+00, 0.35740383783153215238e+00,
0.38335932419873034692e+00, 0.40897982122988867241e+00,
0.43424374934680255800e+00, 0.45913001198983233287e+00,
0.48361802694584102756e+00, 0.50768775753371660215e+00,
0.53131974364437562397e+00, 0.55449513263193254887e+00,
0.57719571005204581484e+00, 0.59940393024224289297e+00,
0.62110294673722640294e+00, 0.64227664250975951377e+00,
0.66290966002478059546e+00, 0.68298743109107922809e+00,
0.70249620649152707861e+00, 0.72142308537009891548e+00,
0.73975604435269475868e+00, 0.75748396638051363793e+00,
0.77459666924148337704e+00, 0.79108493379984836143e+00,
0.80694053195021761186e+00, 0.82215625436498040737e+00,
0.83672593816886873550e+00, 0.85064449476835027976e+00,
0.86390793819369047715e+00, 0.87651341448470526974e+00,
0.88845923287225699889e+00, 0.89974489977694003664e+00,
0.91037115695700429250e+00, 0.92034002547001242073e+00,
0.92965485742974005667e+00, 0.93832039777959288365e+00,
0.94634285837340290515e+00, 0.95373000642576113641e+00,
0.96049126870802028342e+00, 0.96663785155841656709e+00,
0.97218287474858179658e+00, 0.97714151463970571416e+00,
0.98153114955374010687e+00, 0.98537149959852037111e+00,
0.98868475754742947994e+00, 0.99149572117810613240e+00,
0.99383196321275502221e+00, 0.99572410469840718851e+00,
0.99720625937222195908e+00, 0.99831663531840739253e+00,
0.99909812496766759766e+00, 0.99959879967191068325e+00,
0.99987288812035761194e+00, 0.99998243035489159858e+00,
), (
0.0000505360952078625176247e+00, 0.000180739564445388357820e+00,
0.000377746646326984660274e+00, 0.000632607319362633544219e+00,
0.000938369848542381500794e+00, 0.00128952408261041739210e+00,
0.00168114286542146990631e+00, 0.00210881524572663287933e+00,
0.00256876494379402037313e+00, 0.00305775341017553113613e+00,
0.00357289278351729964938e+00, 0.00411150397865469304717e+00,
0.00467105037211432174741e+00, 0.00524912345480885912513e+00,
0.00584344987583563950756e+00, 0.00645190005017573692280e+00,
0.00707248999543355546805e+00, 0.00770337523327974184817e+00,
0.00834283875396815770558e+00, 0.00898927578406413572328e+00,
0.00964117772970253669530e+00, 0.0102971169579563555237e+00,
0.0109557333878379016480e+00, 0.0116157233199551347270e+00,
0.0122758305600827700870e+00, 0.0129348396636073734547e+00,
0.0135915710097655467896e+00, 0.0142448773729167743063e+00,
0.0148936416648151820348e+00, 0.0155367755558439824399e+00,
0.0161732187295777199419e+00, 0.0168019385741038652709e+00,
0.0174219301594641737472e+00, 0.0180322163903912863201e+00,
0.0186318482561387901863e+00, 0.0192199051247277660193e+00,
0.0197954950480974994880e+00, 0.0203577550584721594669e+00,
0.0209058514458120238522e+00, 0.0214389800125038672465e+00,
0.0219563663053178249393e+00, 0.0224572658268160987071e+00,
0.0229409642293877487608e+00, 0.0234067774953140062013e+00,
0.0238540521060385400804e+00, 0.0242821652033365993580e+00,
0.0246905247444876769091e+00, 0.0250785696529497687068e+00,
0.0254457699654647658126e+00, 0.0257916269760242293884e+00,
0.0261156733767060976805e+00, 0.0264174733950582599310e+00,
0.0266966229274503599062e+00, 0.0269527496676330319634e+00,
0.0271855132296247918192e+00, 0.0273946052639814325161e+00,
0.0275797495664818730349e+00, 0.0277407021782796819939e+00,
0.0278772514766137016085e+00, 0.0279892182552381597038e+00,
0.0280764557938172466068e+00, 0.0281388499156271506363e+00,
0.0281763190330166021307e+00, 0.0281888141801923586938e+00,
0.0281763190330166021307e+00, 0.0281388499156271506363e+00,
0.0280764557938172466068e+00, 0.0279892182552381597038e+00,
0.0278772514766137016085e+00, 0.0277407021782796819939e+00,
0.0275797495664818730349e+00, 0.0273946052639814325161e+00,
0.0271855132296247918192e+00, 0.0269527496676330319634e+00,
0.0266966229274503599062e+00, 0.0264174733950582599310e+00,
0.0261156733767060976805e+00, 0.0257916269760242293884e+00,
0.0254457699654647658126e+00, 0.0250785696529497687068e+00,
0.0246905247444876769091e+00, 0.0242821652033365993580e+00,
0.0238540521060385400804e+00, 0.0234067774953140062013e+00,
0.0229409642293877487608e+00, 0.0224572658268160987071e+00,
0.0219563663053178249393e+00, 0.0214389800125038672465e+00,
0.0209058514458120238522e+00, 0.0203577550584721594669e+00,
0.0197954950480974994880e+00, 0.0192199051247277660193e+00,
0.0186318482561387901863e+00, 0.0180322163903912863201e+00,
0.0174219301594641737472e+00, 0.0168019385741038652709e+00,
0.0161732187295777199419e+00, 0.0155367755558439824399e+00,
0.0148936416648151820348e+00, 0.0142448773729167743063e+00,
0.0135915710097655467896e+00, 0.0129348396636073734547e+00,
0.0122758305600827700870e+00, 0.0116157233199551347270e+00,
0.0109557333878379016480e+00, 0.0102971169579563555237e+00,
0.00964117772970253669530e+00, 0.00898927578406413572328e+00,
0.00834283875396815770558e+00, 0.00770337523327974184817e+00,
0.00707248999543355546805e+00, 0.00645190005017573692280e+00,
0.00584344987583563950756e+00, 0.00524912345480885912513e+00,
0.00467105037211432174741e+00, 0.00411150397865469304717e+00,
0.00357289278351729964938e+00, 0.00305775341017553113613e+00,
0.00256876494379402037313e+00, 0.00210881524572663287933e+00,
0.00168114286542146990631e+00, 0.00128952408261041739210e+00,
0.000938369848542381500794e+00, 0.000632607319362633544219e+00,
0.000377746646326984660274e+00, 0.000180739564445388357820e+00,
0.0000505360952078625176247e+00,
)),
255 : ((
-0.99999759637974846462e+00, -0.99998243035489159858e+00,
-0.99994399620705437576e+00, -0.99987288812035761194e+00,
-0.99976049092443204733e+00, -0.99959879967191068325e+00,
-0.99938033802502358193e+00, -0.99909812496766759766e+00,
-0.99874561446809511470e+00, -0.99831663531840739253e+00,
-0.99780535449595727456e+00, -0.99720625937222195908e+00,
-0.99651414591489027385e+00, -0.99572410469840718851e+00,
-0.99483150280062100052e+00, -0.99383196321275502221e+00,
-0.99272134428278861533e+00, -0.99149572117810613240e+00,
-0.99015137040077015918e+00, -0.98868475754742947994e+00,
-0.98709252795403406719e+00, -0.98537149959852037111e+00,
-0.98351865757863272876e+00, -0.98153114955374010687e+00,
-0.97940628167086268381e+00, -0.97714151463970571416e+00,
-0.97473445975240266776e+00, -0.97218287474858179658e+00,
-0.96948465950245923177e+00, -0.96663785155841656709e+00,
-0.96364062156981213252e+00, -0.96049126870802028342e+00,
-0.95718821610986096274e+00, -0.95373000642576113641e+00,
-0.95011529752129487656e+00, -0.94634285837340290515e+00,
-0.94241156519108305981e+00, -0.93832039777959288365e+00,
-0.93406843615772578800e+00, -0.92965485742974005667e+00,
-0.92507893290707565236e+00, -0.92034002547001242073e+00,
-0.91543758715576504064e+00, -0.91037115695700429250e+00,
-0.90514035881326159519e+00, -0.89974489977694003664e+00,
-0.89418456833555902286e+00, -0.88845923287225699889e+00,
-0.88256884024734190684e+00, -0.87651341448470526974e+00,
-0.87029305554811390585e+00, -0.86390793819369047715e+00,
-0.85735831088623215653e+00, -0.85064449476835027976e+00,
-0.84376688267270860104e+00, -0.83672593816886873550e+00,
-0.82952219463740140018e+00, -0.82215625436498040737e+00,
-0.81462878765513741344e+00, -0.80694053195021761186e+00,
-0.79909229096084140180e+00, -0.79108493379984836143e+00,
-0.78291939411828301639e+00, -0.77459666924148337704e+00,
-0.76611781930376009072e+00, -0.75748396638051363793e+00,
-0.74869629361693660282e+00, -0.73975604435269475868e+00,
-0.73066452124218126133e+00, -0.72142308537009891548e+00,
-0.71203315536225203459e+00, -0.70249620649152707861e+00,
-0.69281376977911470289e+00, -0.68298743109107922809e+00,
-0.67301883023041847920e+00, -0.66290966002478059546e+00,
-0.65266166541001749610e+00, -0.64227664250975951377e+00,
-0.63175643771119423041e+00, -0.62110294673722640294e+00,
-0.61031811371518640016e+00, -0.59940393024224289297e+00,
-0.58836243444766254143e+00, -0.57719571005204581484e+00,
-0.56590588542365442262e+00, -0.55449513263193254887e+00,
-0.54296566649831149049e+00, -0.53131974364437562397e+00,
-0.51955966153745702199e+00, -0.50768775753371660215e+00,
-0.49570640791876146017e+00, -0.48361802694584102756e+00,
-0.47142506587165887693e+00, -0.45913001198983233287e+00,
-0.44673538766202847374e+00, -0.43424374934680255800e+00,
-0.42165768662616330006e+00, -0.40897982122988867241e+00,
-0.39621280605761593918e+00, -0.38335932419873034692e+00,
-0.37042208795007823014e+00, -0.35740383783153215238e+00,
-0.34430734159943802278e+00, -0.33113539325797683309e+00,
-0.31789081206847668318e+00, -0.30457644155671404334e+00,
-0.29119514851824668196e+00, -0.27774982202182431507e+00,
-0.26424337241092676194e+00, -0.25067873030348317661e+00,
-0.23705884558982972721e+00, -0.22338668642896688163e+00,
-0.20966523824318119477e+00, -0.19589750271110015392e+00,
-0.18208649675925219825e+00, -0.16823525155220746498e+00,
-0.15434681148137810869e+00, -0.14042423315256017459e+00,
-0.12647058437230196685e+00, -0.11248894313318662575e+00,
-0.098482396598119202090e+00, -0.084454040083710883710e+00,
-0.070406976042855179063e+00, -0.056344313046592789972e+00,
-0.042269164765363603212e+00, -0.028184648949745694339e+00,
-0.014093886410782462614e+00, 0.0e+00, 0.014093886410782462614e+00,
0.028184648949745694339e+00, 0.042269164765363603212e+00,
0.056344313046592789972e+00, 0.070406976042855179063e+00,
0.084454040083710883710e+00, 0.098482396598119202090e+00,
0.11248894313318662575e+00, 0.12647058437230196685e+00,
0.14042423315256017459e+00, 0.15434681148137810869e+00,
0.16823525155220746498e+00, 0.18208649675925219825e+00,
0.19589750271110015392e+00, 0.20966523824318119477e+00,
0.22338668642896688163e+00, 0.23705884558982972721e+00,
0.25067873030348317661e+00, 0.26424337241092676194e+00,
0.27774982202182431507e+00, 0.29119514851824668196e+00,
0.30457644155671404334e+00, 0.31789081206847668318e+00,
0.33113539325797683309e+00, 0.34430734159943802278e+00,
0.35740383783153215238e+00, 0.37042208795007823014e+00,
0.38335932419873034692e+00, 0.39621280605761593918e+00,
0.40897982122988867241e+00, 0.42165768662616330006e+00,
0.43424374934680255800e+00, 0.44673538766202847374e+00,
0.45913001198983233287e+00, 0.47142506587165887693e+00,
0.48361802694584102756e+00, 0.49570640791876146017e+00,
0.50768775753371660215e+00, 0.51955966153745702199e+00,
0.53131974364437562397e+00, 0.54296566649831149049e+00,
0.55449513263193254887e+00, 0.56590588542365442262e+00,
0.57719571005204581484e+00, 0.58836243444766254143e+00,
0.59940393024224289297e+00, 0.61031811371518640016e+00,
0.62110294673722640294e+00, 0.63175643771119423041e+00,
0.64227664250975951377e+00, 0.65266166541001749610e+00,
0.66290966002478059546e+00, 0.67301883023041847920e+00,
0.68298743109107922809e+00, 0.69281376977911470289e+00,
0.70249620649152707861e+00, 0.71203315536225203459e+00,
0.72142308537009891548e+00, 0.73066452124218126133e+00,
0.73975604435269475868e+00, 0.74869629361693660282e+00,
0.75748396638051363793e+00, 0.76611781930376009072e+00,
0.77459666924148337704e+00, 0.78291939411828301639e+00,
0.79108493379984836143e+00, 0.79909229096084140180e+00,
0.80694053195021761186e+00, 0.81462878765513741344e+00,
0.82215625436498040737e+00, 0.82952219463740140018e+00,
0.83672593816886873550e+00, 0.84376688267270860104e+00,
0.85064449476835027976e+00, 0.85735831088623215653e+00,
0.86390793819369047715e+00, 0.87029305554811390585e+00,
0.87651341448470526974e+00, 0.88256884024734190684e+00,
0.88845923287225699889e+00, 0.89418456833555902286e+00,
0.89974489977694003664e+00, 0.90514035881326159519e+00,
0.91037115695700429250e+00, 0.91543758715576504064e+00,
0.92034002547001242073e+00, 0.92507893290707565236e+00,
0.92965485742974005667e+00, 0.93406843615772578800e+00,
0.93832039777959288365e+00, 0.94241156519108305981e+00,
0.94634285837340290515e+00, 0.95011529752129487656e+00,
0.95373000642576113641e+00, 0.95718821610986096274e+00,
0.96049126870802028342e+00, 0.96364062156981213252e+00,
0.96663785155841656709e+00, 0.96948465950245923177e+00,
0.97218287474858179658e+00, 0.97473445975240266776e+00,
0.97714151463970571416e+00, 0.97940628167086268381e+00,
0.98153114955374010687e+00, 0.98351865757863272876e+00,
0.98537149959852037111e+00, 0.98709252795403406719e+00,
0.98868475754742947994e+00, 0.99015137040077015918e+00,
0.99149572117810613240e+00, 0.99272134428278861533e+00,
0.99383196321275502221e+00, 0.99483150280062100052e+00,
0.99572410469840718851e+00, 0.99651414591489027385e+00,
0.99720625937222195908e+00, 0.99780535449595727456e+00,
0.99831663531840739253e+00, 0.99874561446809511470e+00,
0.99909812496766759766e+00, 0.99938033802502358193e+00,
0.99959879967191068325e+00, 0.99976049092443204733e+00,
0.99987288812035761194e+00, 0.99994399620705437576e+00,
0.99998243035489159858e+00, 0.99999759637974846462e+00,
), (
0.69379364324108267170e-05, 0.25157870384280661489e-04,
0.53275293669780613125e-04, 0.90372734658751149261e-04,
0.13575491094922871973e-03, 0.18887326450650491366e-03,
0.24921240048299729402e-03, 0.31630366082226447689e-03,
0.38974528447328229322e-03, 0.46918492424785040975e-03,
0.55429531493037471492e-03, 0.64476204130572477933e-03,
0.74028280424450333046e-03, 0.84057143271072246365e-03,
0.94536151685852538246e-03, 0.10544076228633167722e-02,
0.11674841174299594077e-02, 0.12843824718970101768e-02,
0.14049079956551446427e-02, 0.15288767050877655684e-02,
0.16561127281544526052e-02, 0.17864463917586498247e-02,
0.19197129710138724125e-02, 0.20557519893273465236e-02,
0.21944069253638388388e-02, 0.23355251860571608737e-02,
0.24789582266575679307e-02, 0.26245617274044295626e-02,
0.27721957645934509940e-02, 0.29217249379178197538e-02,
0.30730184347025783234e-02, 0.32259500250878684614e-02,
0.33803979910869203823e-02, 0.35362449977167777340e-02,
0.36933779170256508183e-02, 0.38516876166398709241e-02,
0.40110687240750233989e-02, 0.41714193769840788528e-02,
0.43326409680929828545e-02, 0.44946378920320678616e-02,
0.46573172997568547773e-02, 0.48205888648512683476e-02,
0.49843645647655386012e-02, 0.51485584789781777618e-02,
0.53130866051870565663e-02, 0.54778666939189508240e-02,
0.56428181013844441585e-02, 0.58078616599775673635e-02,
0.59729195655081658049e-02, 0.61379152800413850435e-02,
0.63027734490857587172e-02, 0.64674198318036867274e-02,
0.66317812429018878941e-02, 0.67957855048827733948e-02,
0.69593614093904229394e-02, 0.71224386864583871532e-02,
0.72849479805538070639e-02, 0.74468208324075910174e-02,
0.76079896657190565832e-02, 0.77683877779219912200e-02,
0.79279493342948491103e-02, 0.80866093647888599710e-02,
0.82443037630328680306e-02, 0.84009692870519326354e-02,
0.85565435613076896192e-02, 0.87109650797320868736e-02,
0.88641732094824942641e-02, 0.90161081951956431600e-02,
0.91667111635607884067e-02, 0.93159241280693950932e-02,
0.94636899938300652943e-02, 0.96099525623638830097e-02,
0.97546565363174114611e-02, 0.98977475240487497440e-02,
0.10039172044056840798e-01, 0.10178877529236079733e-01,
0.10316812330947621682e-01, 0.10452925722906011926e-01,
0.10587167904885197931e-01, 0.10719490006251933623e-01,
0.10849844089337314099e-01, 0.10978183152658912470e-01,
0.11104461134006926537e-01, 0.11228632913408049354e-01,
0.11350654315980596602e-01, 0.11470482114693874380e-01,
0.11588074033043952568e-01, 0.11703388747657003101e-01,
0.11816385890830235763e-01, 0.11927026053019270040e-01,
0.12035270785279562630e-01, 0.12141082601668299679e-01,
0.12244424981611985899e-01, 0.12345262372243838455e-01,
0.12443560190714035263e-01, 0.12539284826474884353e-01,
0.12632403643542078765e-01, 0.12722884982732382906e-01,
0.12810698163877361967e-01, 0.12895813488012114694e-01,
0.12978202239537399286e-01, 0.13057836688353048840e-01,
0.13134690091960152836e-01, 0.13208736697529129966e-01,
0.13279951743930530650e-01, 0.13348311463725179953e-01,
0.13413793085110098513e-01, 0.13476374833816515982e-01,
0.13536035934956213614e-01, 0.13592756614812395910e-01,
0.13646518102571291428e-01, 0.13697302631990716258e-01,
0.13745093443001896632e-01, 0.13789874783240936517e-01,
0.13831631909506428676e-01, 0.13870351089139840997e-01,
0.13906019601325461264e-01, 0.13938625738306850804e-01,
0.13968158806516938516e-01, 0.13994609127619079852e-01,
0.14017968039456608810e-01, 0.14038227896908623303e-01,
0.14055382072649964277e-01, 0.14069424957813575318e-01,
0.14080351962553661325e-01, 0.14088159516508301065e-01,
0.14092845069160408355e-01, 0.14094407090096179347e-01,
0.14092845069160408355e-01, 0.14088159516508301065e-01,
0.14080351962553661325e-01, 0.14069424957813575318e-01,
0.14055382072649964277e-01, 0.14038227896908623303e-01,
0.14017968039456608810e-01, 0.13994609127619079852e-01,
0.13968158806516938516e-01, 0.13938625738306850804e-01,
0.13906019601325461264e-01, 0.13870351089139840997e-01,
0.13831631909506428676e-01, 0.13789874783240936517e-01,
0.13745093443001896632e-01, 0.13697302631990716258e-01,
0.13646518102571291428e-01, 0.13592756614812395910e-01,
0.13536035934956213614e-01, 0.13476374833816515982e-01,
0.13413793085110098513e-01, 0.13348311463725179953e-01,
0.13279951743930530650e-01, 0.13208736697529129966e-01,
0.13134690091960152836e-01, 0.13057836688353048840e-01,
0.12978202239537399286e-01, 0.12895813488012114694e-01,
0.12810698163877361967e-01, 0.12722884982732382906e-01,
0.12632403643542078765e-01, 0.12539284826474884353e-01,
0.12443560190714035263e-01, 0.12345262372243838455e-01,
0.12244424981611985899e-01, 0.12141082601668299679e-01,
0.12035270785279562630e-01, 0.11927026053019270040e-01,
0.11816385890830235763e-01, 0.11703388747657003101e-01,
0.11588074033043952568e-01, 0.11470482114693874380e-01,
0.11350654315980596602e-01, 0.11228632913408049354e-01,
0.11104461134006926537e-01, 0.10978183152658912470e-01,
0.10849844089337314099e-01, 0.10719490006251933623e-01,
0.10587167904885197931e-01, 0.10452925722906011926e-01,
0.10316812330947621682e-01, 0.10178877529236079733e-01,
0.10039172044056840798e-01, 0.98977475240487497440e-02,
0.97546565363174114611e-02, 0.96099525623638830097e-02,
0.94636899938300652943e-02, 0.93159241280693950932e-02,
0.91667111635607884067e-02, 0.90161081951956431600e-02,
0.88641732094824942641e-02, 0.87109650797320868736e-02,
0.85565435613076896192e-02, 0.84009692870519326354e-02,
0.82443037630328680306e-02, 0.80866093647888599710e-02,
0.79279493342948491103e-02, 0.77683877779219912200e-02,
0.76079896657190565832e-02, 0.74468208324075910174e-02,
0.72849479805538070639e-02, 0.71224386864583871532e-02,
0.69593614093904229394e-02, 0.67957855048827733948e-02,
0.66317812429018878941e-02, 0.64674198318036867274e-02,
0.63027734490857587172e-02, 0.61379152800413850435e-02,
0.59729195655081658049e-02, 0.58078616599775673635e-02,
0.56428181013844441585e-02, 0.54778666939189508240e-02,
0.53130866051870565663e-02, 0.51485584789781777618e-02,
0.49843645647655386012e-02, 0.48205888648512683476e-02,
0.46573172997568547773e-02, 0.44946378920320678616e-02,
0.43326409680929828545e-02, 0.41714193769840788528e-02,
0.40110687240750233989e-02, 0.38516876166398709241e-02,
0.36933779170256508183e-02, 0.35362449977167777340e-02,
0.33803979910869203823e-02, 0.32259500250878684614e-02,
0.30730184347025783234e-02, 0.29217249379178197538e-02,
0.27721957645934509940e-02, 0.26245617274044295626e-02,
0.24789582266575679307e-02, 0.23355251860571608737e-02,
0.21944069253638388388e-02, 0.20557519893273465236e-02,
0.19197129710138724125e-02, 0.17864463917586498247e-02,
0.16561127281544526052e-02, 0.15288767050877655684e-02,
0.14049079956551446427e-02, 0.12843824718970101768e-02,
0.11674841174299594077e-02, 0.10544076228633167722e-02,
0.94536151685852538246e-03, 0.84057143271072246365e-03,
0.74028280424450333046e-03, 0.64476204130572477933e-03,
0.55429531493037471492e-03, 0.46918492424785040975e-03,
0.38974528447328229322e-03, 0.31630366082226447689e-03,
0.24921240048299729402e-03, 0.18887326450650491366e-03,
0.13575491094922871973e-03, 0.90372734658751149261e-04,
0.53275293669780613125e-04, 0.25157870384280661489e-04,
0.69379364324108267170e-05,
)),
511 : ((
-0.999999672956734384381e+00, -0.999997596379748464620e+00,
-0.999992298136257588028e+00, -0.999982430354891598580e+00,
-0.999966730098486276883e+00, -0.999943996207054375764e+00,
-0.999913081144678282800e+00, -0.999872888120357611938e+00,
-0.999822363679787739196e+00, -0.999760490924432047330e+00,
-0.999686286448317731776e+00, | |
numbers
literals are agnostic of their bit width this allows us to create the
proper-width value.
Returns:
The resulting interpreter value.
Raises:
EvaluateError: If the type context is missing or inappropriate (e.g. a
tuple cannot be the type for a number).
"""
logging.vlog(4, 'number: %s @ %s', expr, expr.span)
if expr.tok.is_keyword_in((Keyword.TRUE, Keyword.FALSE)):
type_context = type_context or ConcreteType.U1
if not type_context and expr.tok.kind == TokenKind.CHARACTER:
type_context = ConcreteType.U8
if not type_context and expr.tok.kind == TokenKind.KEYWORD:
type_context = ConcreteType.U1 # Boolean.
if not type_context and expr.type_ is None:
raise EvaluateError(
expr.span,
'Internal error: no type context for expression, should be caught '
'by type inference!')
type_context = type_context or self._evaluate_TypeAnnotation(
expr.type_, bindings)
if type_context is None:
raise EvaluateError(
expr.span, 'Missing type context for number @ {}'.format(expr.span))
elif isinstance(type_context, TupleType):
raise EvaluateError(
expr.span, 'Type context for number is a tuple type {} @ {}'.format(
type_context, expr.span))
bit_count = type_context.get_total_bit_count()
signed = type_context.signed
constructor = Value.make_sbits if signed else Value.make_ubits
return constructor(bit_count, expr.get_value_as_int())
def _evaluate_to_struct_or_enum_or_annotation(
self, node: Union[ast.TypeDef, ast.ModRef, ast.Struct],
bindings: Bindings) -> Union[ast.Struct, ast.Enum, ast.TypeAnnotation]:
"""Returns the node dereferenced into a Struct or Enum or TypeAnnotation.
Will produce TypeAnnotation in the case we bottom out in a tuple, for
example.
Args:
node: Node to resolve to a struct/enum/annotation.
bindings: Current bindings for evaluating the node.
"""
while isinstance(node, ast.TypeDef):
annotation = node.type_
if not annotation.is_typeref():
return annotation
node = annotation.typeref.type_def
if isinstance(node, (ast.Struct, ast.Enum)):
return node
assert isinstance(node, ast.ModRef)
imported_module = bindings.resolve_mod(node.mod.identifier)
td = imported_module.get_typedef(node.value_tok.value)
# Recurse to dereference it if it's a typedef in the imported module.
td = self._evaluate_to_struct_or_enum_or_annotation(
td, self._make_top_level_bindings(imported_module))
assert isinstance(td, (ast.Struct, ast.Enum, ast.TypeAnnotation)), td
return td
def _evaluate_to_enum(self, node: Union[ast.TypeDef, ast.Enum],
bindings: Bindings) -> ast.Enum:
type_definition = self._evaluate_to_struct_or_enum_or_annotation(
node, bindings)
assert isinstance(type_definition, ast.Enum), type_definition
return type_definition
def _evaluate_to_struct(self, node: Union[ast.ModRef, ast.Struct],
bindings: Bindings) -> ast.Struct:
"""Evaluates potential module-reference-to-struct to a struct."""
type_definition = self._evaluate_to_struct_or_enum_or_annotation(
node, bindings)
assert isinstance(type_definition, ast.Struct), type_definition
return type_definition
def _evaluate_StructInstance( # pylint: disable=invalid-name
self,
expr: ast.StructInstance,
bindings: Bindings,
type_context: Optional[ConcreteType] # pylint: disable=unused-argument
) -> Value:
"""Evaluates a struct instance AST node to a value."""
struct = self._evaluate_to_struct(expr.struct, bindings)
result = Value.make_tuple(
tuple(
self._evaluate(e, bindings)
for _, e in expr.get_ordered_members(struct)))
return result
def _evaluate_SplatStructInstance( # pylint: disable=invalid-name
self,
expr: ast.SplatStructInstance,
bindings: Bindings,
type_context: Optional[ConcreteType] # pylint: disable=unused-argument
) -> Value:
"""Evaluates a 'splat' struct instance AST node to a value."""
named_tuple = self._evaluate(expr.splatted, bindings)
struct = self._evaluate_to_struct(expr.struct, bindings)
for k, v in expr.members:
new_value = self._evaluate(v, bindings)
i = struct.member_names.index(k)
current_type = concrete_type_from_value(named_tuple.tuple_members[i])
new_type = concrete_type_from_value(new_value)
if new_type != current_type:
raise EvaluateError(
v.span,
f'type error found at interpreter runtime! struct member {k} changing from type {current_type} to {new_type}'
)
named_tuple = named_tuple.tuple_replace(i, new_value)
assert isinstance(named_tuple, Value), named_tuple
return named_tuple
def _evaluate_Attr( # pylint: disable=invalid-name
self,
expr: ast.Attr,
bindings: Bindings,
type_context: Optional[ConcreteType] # pylint: disable=unused-argument
) -> Value:
"""Evaluates an attribute-accessing AST node to a value."""
lhs_value = self._evaluate(expr.lhs, bindings)
index = next(
i for i, name in enumerate(self._node_to_type[expr.lhs].tuple_names)
if name == expr.attr.identifier)
return lhs_value.tuple_members[index]
def _evaluate_XlsTuple( # pylint: disable=invalid-name
self, expr: ast.XlsTuple, bindings: Bindings,
type_context: Optional[ConcreteType]) -> Value:
"""Evaluates an XlsTuple expression AST node to a value."""
def get_type_context(i: int) -> Optional[ConcreteType]:
"""Retrieves the type context for a tuple member.
Args:
i: Which tuple member.
Returns:
The type context for the ith tuple member, if a type context is
available at all.
"""
if type_context is None:
return None
return type_context.get_tuple_member(i)
result = Value.make_tuple(
tuple(
self._evaluate(e, bindings, get_type_context(i))
for i, e in enumerate(expr.members)))
logging.vlog(3, 'tuple: %s', result)
return result
def _evaluate_Ternary( # pylint: disable=invalid-name
self, expr: ast.Ternary, bindings: Bindings,
_: Optional[ConcreteType]) -> Value:
test_value = self._evaluate(expr.test, bindings)
if test_value.is_true():
return self._evaluate(expr.consequent, bindings)
else:
return self._evaluate(expr.alternate, bindings)
def _evaluate_Unop( # pylint: disable=invalid-name
self, expr: ast.Unop, bindings: Bindings,
_: Optional[ConcreteType]) -> Value:
"""Evaluates a 'Unop' AST node to a Value."""
operand_value = self._evaluate(expr.operand, bindings)
if expr.operator.kind == ast.Unop.INV:
return operand_value.bitwise_negate()
if expr.operator.kind == ast.Unop.NEG:
return operand_value.arithmetic_negate()
raise NotImplementedError('Unimplemented unop.', expr.operator)
def _evaluate_Binop( # pylint: disable=invalid-name
self, expr: ast.Binop, bindings: Bindings,
_: Optional[ConcreteType]) -> Value:
"""Evaluates a 'Binop' AST node to a value."""
lhs_value = self._evaluate(expr.lhs, bindings)
rhs_value = self._evaluate(expr.rhs, bindings)
if expr.operator.kind == ast.Binop.ADD:
result = lhs_value.add(rhs_value)
elif expr.operator.kind == ast.Binop.SUB:
result = lhs_value.sub(rhs_value)
elif expr.operator.kind == ast.Binop.CONCAT:
result = lhs_value.concat(rhs_value)
elif expr.operator.kind == ast.Binop.MUL:
result = lhs_value.mul(rhs_value)
elif expr.operator.kind == ast.Binop.DIV:
result = lhs_value.floordiv(rhs_value)
elif expr.operator.get_kind_or_keyword() in (ast.Binop.OR,
ast.Binop.LOGICAL_OR):
result = lhs_value.bitwise_or(rhs_value)
elif expr.operator.get_kind_or_keyword() in (ast.Binop.AND,
ast.Binop.LOGICAL_AND):
result = lhs_value.bitwise_and(rhs_value)
elif expr.operator.get_kind_or_keyword() == ast.Binop.XOR:
result = lhs_value.bitwise_xor(rhs_value)
elif expr.operator.kind == ast.Binop.SHLL: # <<
result = lhs_value.shll(rhs_value)
elif expr.operator.kind == ast.Binop.SHRL: # >>
result = lhs_value.shrl(rhs_value)
elif expr.operator.kind == ast.Binop.SHRA: # >>>
result = lhs_value.shra(rhs_value)
elif expr.operator.kind == ast.Binop.EQ: # ==
result = lhs_value.eq(rhs_value)
elif expr.operator.kind == ast.Binop.NE: # !=
result = lhs_value.ne(rhs_value)
elif expr.operator.kind == ast.Binop.GT: # >
result = lhs_value.gt(rhs_value)
elif expr.operator.kind == ast.Binop.LT: # <
result = lhs_value.lt(rhs_value)
elif expr.operator.kind == ast.Binop.LE: # <=
result = lhs_value.le(rhs_value)
elif expr.operator.kind == ast.Binop.GE: # >=
result = lhs_value.ge(rhs_value)
else:
raise NotImplementedError('Unimplemented binop', expr.operator)
return result
def _evaluate_For( # pylint: disable=invalid-name
self, expr: ast.For, bindings: Bindings,
_: Optional[ConcreteType]) -> Value:
"""Evaluates a 'For' AST node to a value."""
iterable = self._evaluate(expr.iterable, bindings)
concrete_iteration_type = self._concretize(expr.type_, bindings)
carry = self._evaluate(expr.init, bindings)
for i, x in enumerate(iterable):
iteration = Value.make_tuple((x, carry))
if not concrete_type_accepts_value(concrete_iteration_type, iteration):
raise EvaluateError(
expr.type_.span,
'type error found at interpreter runtime! iteration value does not conform to type annotation '
'at top of iteration {}:\n got value: {}\n type: {};\n want: {}'
.format(i, iteration, concrete_type_from_value(iteration),
concrete_iteration_type))
new_bindings = bindings.clone_with(expr.names, iteration)
carry = self._evaluate(expr.body, new_bindings)
return carry
# This function signature conforms to an abstract interface.
# pylint: disable=unused-argument
def _evaluate_Carry( # pylint: disable=invalid-name
self, expr: ast.Carry, bindings: Bindings,
type_context: Optional[ConcreteType]) -> Value:
assert isinstance(expr, ast.Carry), expr
return bindings.resolve_value_from_identifier('carry')
def _evaluate_While( # pylint: disable=invalid-name
self, expr: ast.While, bindings: Bindings,
type_context: Optional[ConcreteType]) -> Value:
carry = self._evaluate(expr.init, bindings)
new_bindings = Bindings(bindings)
new_bindings.add_value('carry', carry)
while self._evaluate(expr.test, new_bindings).is_true():
carry = self._evaluate(expr.body, new_bindings)
new_bindings.add_value('carry', carry)
return carry
def _evaluate_Array( # pylint: disable=invalid-name
self, expr: ast.Array, bindings: Bindings,
type_context: Optional[ConcreteType]) -> Value:
"""Evaluates an 'Array' AST node to a value."""
element_type = None
if type_context is None and expr.type_:
type_context = self._evaluate_TypeAnnotation(expr.type_, bindings)
if type_context is not None:
element_type = type_context.get_element_type()
logging.vlog(3, 'element type for array members: %s @ %s', element_type,
expr.span)
elements = tuple(
self._evaluate(e, bindings, element_type) for e in expr.members)
if expr.has_ellipsis:
assert type_context is not None, type_context
elements = elements + elements[-1:] * (type_context.size - len(elements))
return Value.make_array(elements)
def _evaluate_ConstantArray( # pylint: disable=invalid-name
self, expr: ast.ConstantArray, bindings: Bindings,
type_context: Optional[ConcreteType]) -> Value:
"""Evaluates a 'ConstantArray' AST node to a value."""
return self._evaluate_Array(expr, bindings, type_context)
def _evaluate_ModRef( # pylint: disable=invalid-name
self, expr: ast.ModRef, bindings: Bindings,
_: Optional[ConcreteType]) -> Value:
"""Evaluates a 'ModRef' AST node to a value."""
mod = bindings.resolve_mod(expr.mod.identifier)
f = mod.get_function(expr.value_tok.value)
return Value.make_function(functools.partial(self._evaluate_fn, f, mod))
def _evaluate_Invocation( # pylint: disable=invalid-name
self, expr: ast.Invocation, bindings: Bindings,
_: Optional[ConcreteType]) -> Optional[Value]:
"""Evaluates an 'Invocation' AST node to a value."""
if self._trace_all and isinstance(
expr.callee,
ast.NameRef) and expr.callee.name_def.identifier == 'trace':
# Safe to skip this and return nothing if this is a trace invocation;
# trace isn't an input to any downstream expressions.
return None
arg_values = [self._evaluate(arg, bindings) for arg in expr.args]
callee_value = self._evaluate(expr.callee, bindings)
if not callee_value.is_function():
raise EvaluateError(
expr.callee.span,
'Callee value is not a function (should have been determined during type inference); got: {}'
.format(callee_value))
fn_symbolic_bindings = ()
if bindings.fn_ctx:
# The symbolic bindings of this invocation were already computed during
# typechecking.
fn_symbolic_bindings = expr.symbolic_bindings.get(bindings.fn_ctx, ())
return callee_value.function_payload(
arg_values, expr.span, expr, symbolic_bindings=fn_symbolic_bindings)
def _perform_trace(self, lhs: Text, span: Span, value: Value) -> None:
"""Actually writes the tracing output to stderr."""
leader = 'trace of {} @ {}:'.format(lhs, span)
if sys.stderr.isatty():
print(termcolor.colored(leader, color='blue'), value, file=sys.stderr)
else:
print(leader, value, file=sys.stderr)
def _optional_trace(self, expr: ast.Expr, result: Value) -> None:
| |
<filename>tests/validation/tests/rke/test_update_roles.py
from .conftest import * # NOQA
from .common import * # NOQA
def test_update_roles_1(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a worker
node0 - controlplane
node1 - etcd
node2 - worker
node3 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
# Only use three nodes at first
before_update_nodes = all_nodes[0:-1]
rke_template = 'cluster_update_roles_1_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_1_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_2(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a worker node, then remove original worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a worker
node0 - controlplane
node1 - etcd
node2 - worker <- will be deleted on next update
node3 - worker
After 2nd Update: Deletes original worker
node0 - controlplane
node1 - etcd
node3 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1]
removed_node_nodes = all_nodes[0:2] + all_nodes[3:]
# all_nodes[0:2] = [node0, node1]
# all_nodes[3:] = [node3]
# Only use three nodes at first
rke_template = 'cluster_update_roles_2_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_2_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update removing original worker node, rerun on existing validation pods
rke_template = 'cluster_update_roles_2_3.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_3(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane
node0 - controlplane
node1 - etcd
node2 - worker
node3 - controlplane
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Only use three nodes at first
rke_template = 'cluster_update_roles_3_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding controlplane node, rerun on existing validation pods
rke_template = 'cluster_update_roles_3_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_4(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane node, remove original controlplane
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane
node0 - controlplane <- will be deleted on next update
node1 - etcd
node2 - worker
node3 - controlplane
After 2nd Update: Deletes original controlplane
node1 - etcd
node2 - worker
node3 - controlplane
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
removed_node_nodes = all_nodes[1:]
rke_template = 'cluster_update_roles_4_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding controlplane node, rerun on existing validation pods
rke_template = 'cluster_update_roles_4_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update removing original controlplane node
# rerun on existing validation pods
# all_nodes[1:] = [node1, node2, node3]
rke_template = 'cluster_update_roles_4_3.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_5(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a etcd node to a single node cluster
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Adds a etcd
node0 - controlplane, etcd, worker
node1 - etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(2, test_name)
before_update_nodes = all_nodes[0:-1] # only use one node at first
rke_template = 'cluster_update_roles_5_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding etcd node, rerun on existing validation pods
rke_template = 'cluster_update_roles_5_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_6(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding two etcd nodes to a single node cluster
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Adds two etcd nodes
node0 - controlplane, etcd, worker
node1 - etcd
node2 - etcd
"""
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
before_update_nodes = all_nodes[0:-2] # only use one node at first
rke_template = 'cluster_update_roles_6_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adding 2 etcd nodes, rerun on existing validation pods
rke_template = 'cluster_update_roles_6_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_7(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster deleting one node with all roles in three node cluster
Before Update: Create three node cluster with all roles
node0 - controlplane, etcd, worker
node1 - worker
node2 - etcd
After Update: Remove last node
node0 - controlplane, etcd, worker
node1 - worker
"""
all_nodes = cloud_provider.create_multiple_nodes(3, test_name)
removed_node_nodes = all_nodes[0:-1]
rke_template = 'cluster_update_roles_7_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate')
# Update remove etcd node, rerun on existing validation pods
rke_template = 'cluster_update_roles_7_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_8(test_name, cloud_provider, rke_client, kubectl):
"""
Create a single node cluster, add second node with all roles, and then
delete the original node
Before Update: Create single node cluster with all roles
node0 - controlplane, etcd, worker
After Update: Add second node with all roles
node0 - controlplane, etcd, worker
node1 - controlplane, etcd, worker
After second Update: Remove original node0
node1 - controlplane, etcd, worker
"""
all_nodes = cloud_provider.create_multiple_nodes(2, test_name)
before_update_nodes = all_nodes[0:-1]
removed_node_nodes = all_nodes[1:]
# Inital cluster
rke_template = 'cluster_update_roles_8_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update create a second node will all roles
rke_template = 'cluster_update_roles_8_2.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
network_update1, dns_discovery_update1 = validate_rke_cluster(
rke_client, kubectl, all_nodes, 'afterupdate1')
# Update remove original node with all roles
rke_template = 'cluster_update_roles_8_1.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, removed_node_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate1',
network_validation=network_update1,
dns_validation=dns_discovery_update1)
# Create another validation setup on updated cluster
validate_rke_cluster(
rke_client, kubectl, removed_node_nodes, 'afterupdate2')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_9(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane,worker node
Before Update: Create three node cluster, each node with a single role
node0 - controlplane
node1 - etcd
node2 - worker
After Update: Adds a controlplane, worker
node0 - controlplane
node1 - etcd
node2 - worker
node3 - controlplane, worker
"""
all_nodes = cloud_provider.create_multiple_nodes(4, test_name)
before_update_nodes = all_nodes[0:-1] # only use three nodes at first
# Inital cluster
rke_template = 'cluster_update_roles_9_1.yml.j2'
network, dns_discovery = create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, before_update_nodes,
base_namespace='beforeupdate')
# Update adds node with roles [controlplane,worker]
rke_template = 'cluster_update_roles_9_2.yml.j2'
create_and_validate(
cloud_provider, rke_client, kubectl, rke_template, all_nodes,
base_namespace='beforeupdate', network_validation=network,
dns_validation=dns_discovery)
# Create another validation setup on updated cluster
validate_rke_cluster(rke_client, kubectl, all_nodes, 'afterupdate')
delete_nodes(cloud_provider, all_nodes)
def test_update_roles_10(test_name, cloud_provider, rke_client, kubectl):
"""
Update cluster adding a controlplane,worker node
Before Update: Create three | |
<reponame>f-tonini/py-spatial<gh_stars>10-100
# coding: utf-8
"""
Tool Name: Exploratory Regression
Source Name: ExploratoryRegression.py
Version: ArcGIS 10.0
Author: Environmental Systems Research Institute Inc.
"""
################ Imports ####################
import sys as SYS
import copy as COPY
import os as OS
import collections as COLL
import operator as OP
import locale as LOCALE
import numpy as NUM
import math as MATH
import numpy.linalg as LA
import arcpy as ARCPY
import arcpy.management as DM
import arcpy.da as DA
import ErrorUtils as ERROR
import SSDataObject as SSDO
import SSUtilities as UTILS
import Stats as STATS
import MoransI_Step as GI
import WeightsUtilities as WU
import gapy as GAPY
import itertools as ITER
import locale as LOCALE
LOCALE.setlocale(LOCALE.LC_ALL, '')
################ Output Field Names #################
erFieldNames = ["RunID", "AdjR2", "AICc", "JB",
"K_BP", "MaxVIF", "SA", "NumVars"]
############## Helper Functions ##############
masterJustify = ["right"] * 6 + ["left"]
def returnPerc(numer, denom):
if numer == 0:
return 0.0
else:
return ( (numer * 1.0) / denom) * 100.
def runMoransI(ssdo, residuals, weightsMatrix, weightsType = "SWM",
silent = True):
mi = GI.GlobalI_Step(ssdo, residuals, weightsMatrix,
weightsType = weightsType,
silent = silent)
return mi
def nChooseK(n, k):
top = MATH.factorial(n)
left = MATH.factorial(k)
right = MATH.factorial(n - k)
return (top * 1.0) / (left * right)
def inSameCombo(n, k):
top = MATH.factorial(n - 2)
left = MATH.factorial(k - 2)
right = MATH.factorial(n - k)
return (top * 1.0) / (left * right)
################ Interfaces ##################
def runExploratoryRegression():
"""Retrieves the parameters from the User Interface and executes the
appropriate commands."""
#### Get User Provided Inputs ####
ARCPY.env.overwriteOutput = True
inputFC = ARCPY.GetParameterAsText(0)
dependentVar = ARCPY.GetParameterAsText(1).upper()
independentVarsReg = ARCPY.GetParameterAsText(2)
independentVars = independentVarsReg.upper().split(";")
weightsFile = UTILS.getTextParameter(3)
#### Derived Output ####
outputReportFile = OS.path.join(ARCPY.env.scratchFolder, "ModelSelectionOLS.txt")
#### Search Criterion ####
maxIndVars = UTILS.getNumericParameter(5)
minIndVars = UTILS.getNumericParameter(6)
minR2 = UTILS.getNumericParameter(7)
maxCoef = UTILS.getNumericParameter(8)
maxVIF = UTILS.getNumericParameter(9)
minJB = UTILS.getNumericParameter(10)
minMI = UTILS.getNumericParameter(11)
#### Create a Spatial Stats Data Object (SSDO) ####
ssdo = SSDO.SSDataObject(inputFC)
#### Set Unique ID Field ####
masterField = UTILS.setUniqueIDField(ssdo, weightsFile = weightsFile)
#### MasterField Can Not Be The Dependent Variable ####
if masterField == dependentVar:
ARCPY.AddIDMessage("ERROR", 945, masterField,
ARCPY.GetIDMessage(84112))
raise SystemExit()
#### Remove the MasterField from Independent Vars ####
if masterField in independentVars:
independentVars.remove(masterField)
ARCPY.AddIDMessage("WARNING", 736, masterField)
#### Remove the Dependent Variable from Independent Vars ####
if dependentVar in independentVars:
independentVars.remove(dependentVar)
ARCPY.AddIDMessage("WARNING", 850, dependentVar)
#### Raise Error If No Independent Vars ####
if not len(independentVars):
ARCPY.AddIDMessage("ERROR", 737)
raise SystemExit()
#### Obtain Data ####
allVars = [dependentVar] + independentVars
#### Populate SSDO with Data ####
if not weightsFile:
ssdo.obtainDataGA(masterField, allVars, minNumObs = 5,
warnNumObs = 30)
else:
ssdo.obtainData(masterField, allVars, minNumObs = 5,
warnNumObs = 30)
ExploratoryRegression(ssdo, dependentVar,
independentVars,
weightsFile = weightsFile,
outputReportFile = outputReportFile,
maxIndVars = maxIndVars,
minIndVars = minIndVars,
minR2 = minR2, maxCoef = maxCoef,
maxVIF = maxVIF, minJB = minJB,
minMI = minMI)
#### Send Derived Output back to the tool ####
ARCPY.SetParameterAsText(4, outputReportFile)
################## Classes ###################
class ResultHandler(object):
"""Handles result information for Exploratory Regression."""
def __init__(self, allVarNames, numChoose, ssdo,
weightMatrix, weightsType = "SWM",
minR2 = .5, maxCoef = .01, maxVIF = 5.0,
minJB = .1, minMI = .1, silent = False):
#### Set Initial Attributes ####
UTILS.assignClassAttr(self, locals())
#### Set Label ####
self.numVars = len(self.allVarNames)
self.label = ARCPY.GetIDMessage(84283).format(numChoose, self.numVars)
if numChoose <= 2:
self.eachAppears = 1
else:
self.eachAppears = nChooseK(self.numVars - 2, numChoose - 2)
#### Set Result Structures ####
self.varSignDict = {}
self.signDict = {}
self.vifDict = {}
for varName in self.allVarNames:
self.varSignDict[varName] = [0, 0]
self.signDict[varName] = [0, 0]
self.vifDict[varName] = [0, []]
self.olsResults = {}
self.bestR2Vals = []
self.bestR2Res = []
self.passTable = []
self.passBools = []
self.r2Residuals = NUM.empty((self.ssdo.numObs, 3), dtype = float)
self.allJBPass = UTILS.compareFloat(0.0, self.minJB, rTol = .00000001)
self.allMIPass = UTILS.compareFloat(0.0, self.minMI, rTol = .00000001)
self.miVals = []
def returnSilentBool(self):
"""Returns whether SWM neighbor warnings should be printed."""
if not self.silent and not len(self.miVals):
#### Only Return Neighbor Warnings Once ####
self.silent = True
return False
else:
return True
def runR2Moran(self):
"""Runs Moran's I for highest R2 Models."""
resultList = []
for ind, olsID in enumerate(self.bestR2Res):
olsRes = self.olsResults[olsID]
if olsRes.miPVal is None:
silentBool = self.returnSilentBool()
residuals = self.r2Residuals[:,ind].flatten()
if not self.allMIPass:
mi = runMoransI(self.ssdo, residuals,
self.weightMatrix,
weightsType = self.weightsType,
silent = silentBool)
miPVal = mi.pVal
else:
miPVal = 1.0
olsRes.setMoransI(miPVal)
self.miVals.append(miPVal)
#### Allows the Update of Output Table ####
resultList.append( (olsID, olsRes.miPVal) )
return resultList
def evaluateResult(self, olsResult, residuals, keep = False):
"""Evaluates an OLS result in the context of search criteria."""
#### Evaluate R2 ####
r2Value = olsResult.r2
lenR2 = len(self.bestR2Vals)
inR2 = False
if lenR2 < 3:
self.bestR2Vals.append(r2Value)
self.bestR2Res.append(olsResult.id)
self.r2Residuals[:,lenR2] = residuals
inR2 = True
else:
minIndex = NUM.argsort(self.bestR2Vals)[0]
minValue = self.bestR2Vals[minIndex]
if r2Value > minValue:
self.bestR2Vals[minIndex] = r2Value
self.bestR2Res[minIndex] = olsResult.id
self.r2Residuals[:,minIndex] = residuals
inR2 = True
#### Add to Master List of OLS Results ####
keepBool = (keep or inR2)
if keepBool:
self.olsResults[olsResult.id] = olsResult
#### Evaluate p-values ####
pValVars = olsResult.evaluatePVals(maxCoef = self.maxCoef)
#### Evaluate VIF ####
vifVars = olsResult.evaluateVIF(maxVIF = self.maxVIF)
#### Populate Result Structures ####
for ind, varName in enumerate(olsResult.varNames):
self.signDict[varName][0] += 1
if olsResult.coef[ind] < 0.0:
self.varSignDict[varName][0] += 1
else:
self.varSignDict[varName][1] += 1
for varName in pValVars:
self.signDict[varName][1] += 1
for varName in vifVars:
self.vifDict[varName][0] += 1
self.vifDict[varName][1] += list(vifVars)
#### Obtain Bools ####
pvBool = len(pValVars) == self.numChoose
vifBool = len(vifVars) == 0
r2Bool = olsResult.r2 >= self.minR2
if not self.allJBPass:
jbBool = olsResult.jb > self.minJB
else:
jbBool = True
#### Decision Based on Bools ####
tableBool = pvBool and vifBool
if tableBool:
self.passTable.append(olsResult.id)
allBool = pvBool and vifBool and r2Bool and jbBool
miBool = False
if allBool:
silentBool = self.returnSilentBool()
if not self.allMIPass:
mi = runMoransI(self.ssdo, residuals, self.weightMatrix,
weightsType = self.weightsType,
silent = silentBool)
miPVal = mi.pVal
else:
miPVal = 1.0
olsResult.setMoransI(miPVal)
self.miVals.append(miPVal)
if miPVal > self.minMI:
self.passBools.append(olsResult.id)
self.olsResults[olsResult.id] = olsResult
miBool = True
return r2Bool, pvBool, vifBool, jbBool, miBool, keepBool
def report(self):
"""Reports the results from exploratory regression analysis."""
#### Set Title ####
title = self.label
#### Column Labels ####
labs = [ARCPY.GetIDMessage(84021), ARCPY.GetIDMessage(84249),
ARCPY.GetIDMessage(84042), ARCPY.GetIDMessage(84036),
ARCPY.GetIDMessage(84284), ARCPY.GetIDMessage(84292),
ARCPY.GetIDMessage(84286)]
r2Info = [ labs ]
#### Adjusted R2, Sorted Highest to Lowest with ID Tie Breaks ####
header = ARCPY.GetIDMessage(84287)
numRes = UTILS.ssRange(len(self.bestR2Res))
r2Data = []
for i in numRes:
r2Val = self.bestR2Vals[i]
idVal = int(self.bestR2Res[i].split(":")[-1])
r2Data.append((r2Val, idVal))
r2Data = NUM.array(r2Data, dtype = [('r2', float), ('ids', int)])
r2SortedInds = r2Data.argsort(order = ('r2', 'ids'))
sortIndex = reversed(r2SortedInds)
for ind in sortIndex:
olsID = self.bestR2Res[ind]
olsRes = self.olsResults[olsID]
olsOut = olsRes.report(formatStr = "%0.2f")
r2Info.append(olsOut)
r2Report = UTILS.outputTextTable(r2Info, header = header,
justify = masterJustify)
#### Passing Models ####
header = ARCPY.GetIDMessage(84288)
passList = [ labs ]
r2Values = []
olsIDs = []
for olsID in self.passBools:
olsRes = self.olsResults[olsID]
r2Values.append(olsRes.r2)
olsIDs.append(olsID)
sortIndex = NUM.argsort(r2Values).tolist()
sortIndex.reverse()
for ind in sortIndex:
olsID = olsIDs[ind]
olsRes = self.olsResults[olsID]
olsOut = olsRes.report(formatStr = "%0.6f")
passList.append(olsOut)
passingReport = UTILS.outputTextTable(passList, header = header)
#### Print Report ####
starMess = ARCPY.GetIDMessage(84289) * 78
finalReport = [starMess, title, r2Report, passingReport]
finalReport = "\n".join(finalReport)
finalReport = finalReport + "\n"
ARCPY.AddMessage(finalReport)
return finalReport
class OLSResult(object):
"""Holds OLS Result Info for Exploratory Regression."""
def __init__(self, id, varNames, coef, pVals, vifVals,
r2, aic, jb, bp, allMIPass = False):
#### Set Initial Attributes ####
UTILS.assignClassAttr(self, locals())
self.pVals = NUM.array(pVals)
self.varNameArray = NUM.array(self.varNames)
self.miPVal = None
self.k = len(varNames)
#### Create Model to Print ####
self.createModel()
def evaluateVIF(self, maxVIF = 5.0):
"""Evaluates VIF values."""
if self.k >= 2:
self.maxVIFValue = self.vifVals.max()
overIndices = NUM.where(self.vifVals >= maxVIF)
return self.varNameArray[overIndices]
else:
self.maxVIFValue = 1.0
return NUM.array([])
def evaluatePVals(self, maxCoef = .01):
"""Evaluates coefficient p-values."""
overIndices = NUM.where(self.pVals <= maxCoef)
return self.varNameArray[overIndices]
def createModel(self):
model = []
for ind, varName in enumerate(self.varNames):
pVal = self.pVals[ind]
coefVal = self.coef[ind]
#### Determine Variable Sign ####
if coefVal < 0:
vRes = " -"
else:
vRes = " +"
#### Determine Significance Level ####
if pVal <= .1 and pVal > .05:
vRes += varName + "*"
elif pVal <= .05 and pVal > .01:
vRes += varName + "**"
elif pVal <= .01:
vRes += varName + "***"
else:
vRes += varName
#### Add to Model ####
model.append(vRes)
#### Set Attribute ####
self.model | |
gaw_pagename = '0-20008-0-{}'.format(self.gaw_id)
return {
'id': self.station_id,
'type': 'Feature',
'geometry': point2geojsongeometry(self.x, self.y, self.z),
'properties': {
'woudc_id': self.station_id,
'gaw_id': self.gaw_id,
'name': self.station_name.name,
'type': self.station_type,
'country_name_en': self.country.name_en,
'country_name_fr': self.country.name_fr,
'wmo_region_id': self.wmo_region_id,
'active': self.active,
'start_date': strftime_rfc3339(self.start_date),
'end_date': strftime_rfc3339(self.end_date),
'last_validated_datetime':
strftime_rfc3339(self.last_validated_datetime),
'gaw_url': '{}/{}'.format(gaw_baseurl, gaw_pagename)
}
}
def __repr__(self):
return 'Station ({}, {})'.format(self.station_id,
self.station_name.name)
class StationName(base):
"""Data Registry Station Alternative Name"""
__tablename__ = 'station_names'
__table_args__ = (UniqueConstraint('station_name_id'),)
id_field = 'station_name_id'
id_dependencies = ['station_id', 'name']
station_name_id = Column(String, primary_key=True)
station_id = Column(String, nullable=False)
name = Column(String, nullable=False)
def __init__(self, dict_):
self.station_id = dict_['station_id']
self.name = dict_['name']
self.generate_ids()
def __repr__(self):
return 'Station name ({}, {})'.format(self.station_id, self.name)
def generate_ids(self):
"""Builds and sets class ID field from other attributes"""
if all([hasattr(self, field) and getattr(self, field) is not None
for field in self.id_dependencies]):
components = [getattr(self, field)
for field in self.id_dependencies]
self.station_name_id = ':'.join(map(str, components))
class Deployment(base):
"""Data Registry Deployment"""
__tablename__ = 'deployments'
__table_args__ = (UniqueConstraint('deployment_id'),)
id_field = 'deployment_id'
id_dependencies = ['station_id', 'contributor_id']
deployment_id = Column(String, primary_key=True)
station_id = Column(String, ForeignKey('stations.station_id'),
nullable=False)
contributor_id = Column(String, ForeignKey('contributors.contributor_id'),
nullable=False)
start_date = Column(Date, nullable=False)
end_date = Column(Date, nullable=True)
# relationships
station = relationship('Station', backref=__tablename__)
contributor = relationship('Contributor', backref=__tablename__)
def __init__(self, dict_):
"""serializer"""
self.station_id = dict_['station_id']
self.contributor_id = dict_['contributor_id']
self.generate_ids()
try:
if isinstance(dict_['start_date'], datetime.date):
self.start_date = dict_['start_date']
else:
self.start_date = datetime.datetime.strptime(
dict_['start_date'], '%Y-%m-%d').date()
if dict_['end_date'] is None \
or isinstance(dict_['end_date'], datetime.date):
self.end_date = dict_['end_date']
elif dict_['end_date']:
self.end_date = datetime.datetime.strptime(
dict_['end_date'], '%Y-%m-%d').date()
except Exception as err:
LOGGER.error(err)
@property
def __geo_interface__(self):
if self.station is None:
geom = None
else:
geom = point2geojsongeometry(self.station.x, self.station.y,
self.station.z)
return {
'id': self.deployment_id,
'type': 'Feature',
'geometry': geom,
'properties': {
'identifier': self.deployment_id,
'station_id': self.station_id,
'station_type': self.station.station_type,
'station_name': self.station.station_name.name,
'station_country_en': self.station.country.name_en,
'station_country_fr': self.station.country.name_fr,
'contributor': self.contributor.acronym,
'contributor_name': self.contributor.name,
'contributor_project': self.contributor.project_id,
'contributor_url': self.contributor.url,
'start_date': strftime_rfc3339(self.start_date),
'end_date': strftime_rfc3339(self.end_date)
}
}
def __repr__(self):
return 'Deployment ({})'.format(self.deployment_id)
def generate_ids(self):
"""Builds and sets class ID field from other attributes"""
if all([hasattr(self, field) and getattr(self, field) is not None
for field in self.id_dependencies]):
components = [getattr(self, field)
for field in self.id_dependencies]
self.deployment_id = ':'.join(map(str, components))
class DataRecord(base):
"""Data Registry Data Record"""
__tablename__ = 'data_records'
__table_args__ = (
ForeignKeyConstraint(
['data_generation_agency', 'content_class'],
['contributors.acronym', 'contributors.project_id']),
)
id_field = 'data_record_id'
id_dependencies = [
'content_class',
'content_category',
'content_level',
'content_form',
'data_generation_agency',
'platform_type',
'station_id',
'instrument_name',
'instrument_model',
'instrument_number',
'timestamp_date',
'data_generation_version'
]
data_record_id = Column(String, primary_key=True)
# Extended CSV core fields
content_class = Column(String, ForeignKey('projects.project_id'),
nullable=False)
content_category = Column(String, ForeignKey('datasets.dataset_id'),
nullable=False)
content_level = Column(String, nullable=False)
content_form = Column(String, nullable=False)
data_generation_date = Column(Date, nullable=False)
data_generation_agency = Column(String, nullable=False)
data_generation_version = Column(String, nullable=False)
data_generation_scientific_authority = Column(String, nullable=True)
station_id = Column(String, ForeignKey('stations.station_id'),
nullable=False)
instrument_id = Column(String, ForeignKey('instruments.instrument_id'),
nullable=False)
x = Column(Float, nullable=False)
y = Column(Float, nullable=False)
z = Column(Float, nullable=True)
timestamp_utcoffset = Column(String, nullable=False)
timestamp_date = Column(Date, nullable=False)
timestamp_time = Column(Time, nullable=True)
number_of_observations = Column(Integer, nullable=True)
# data management fields
published = Column(Boolean, nullable=False, default=False)
received_datetime = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow())
inserted_datetime = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow())
processed_datetime = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow())
published_datetime = Column(DateTime, nullable=False,
default=datetime.datetime.utcnow())
ingest_filepath = Column(String, nullable=False)
filename = Column(String, nullable=False)
output_filepath = Column(String, nullable=False)
url = Column(String, nullable=False)
es_id = Column(String, nullable=False)
# Relationships
station = relationship('Station', backref=__tablename__)
instrument = relationship('Instrument', backref=__tablename__)
def __init__(self, ecsv):
"""serializer"""
self.content_class = ecsv.extcsv['CONTENT']['Class']
self.content_category = ecsv.extcsv['CONTENT']['Category']
self.content_level = ecsv.extcsv['CONTENT']['Level']
self.content_form = ecsv.extcsv['CONTENT']['Form']
self.data_generation_date = ecsv.extcsv['DATA_GENERATION']['Date']
self.data_generation_agency = ecsv.extcsv['DATA_GENERATION']['Agency']
self.data_generation_version = \
ecsv.extcsv['DATA_GENERATION']['Version']
if 'ScientificAuthority' in ecsv.extcsv['DATA_GENERATION']:
self.data_generation_scientific_authority = \
ecsv.extcsv['DATA_GENERATION']['ScientificAuthority']
self._platform_type = ecsv.extcsv['PLATFORM']['Type']
self._platform_name = ecsv.extcsv['PLATFORM']['Name']
self._platform_country = ecsv.extcsv['PLATFORM']['Country']
self._platform_gaw_id = ecsv.extcsv['PLATFORM'].get('GAW_ID', None)
self.station_id = str(ecsv.extcsv['PLATFORM']['ID'])
self._instrument_name = ecsv.extcsv['INSTRUMENT']['Name']
self._instrument_model = str(ecsv.extcsv['INSTRUMENT']['Model'])
self._instrument_number = str(ecsv.extcsv['INSTRUMENT']['Number'])
self.deployment_id = ':'.join([
self.station_id,
self.data_generation_agency,
self.content_class
])
self.instrument_id = ':'.join([
self.instrument_name,
self.instrument_model,
self.instrument_number,
self.content_category,
self.deployment_id
])
self.timestamp_utcoffset = ecsv.extcsv['TIMESTAMP']['UTCOffset']
self.timestamp_date = ecsv.extcsv['TIMESTAMP']['Date']
if 'Time' in ecsv.extcsv['TIMESTAMP']:
self.timestamp_time = ecsv.extcsv['TIMESTAMP']['Time']
self.x = ecsv.extcsv['LOCATION']['Longitude']
self.y = ecsv.extcsv['LOCATION']['Latitude']
self.z = ecsv.extcsv['LOCATION']['Height']
self.generate_ids()
self.extcsv = ecsv.extcsv
self.number_of_observations = ecsv.number_of_observations()
@property
def platform_type(self):
if hasattr(self, '_platform_type'):
return self._platform_type
else:
return self.station.station_type
@property
def platform_name(self):
if hasattr(self, '_platform_name'):
return self._platform_name
else:
return self.station.name
@property
def platform_country(self):
if hasattr(self, '_platform_country'):
return self._platform_country
else:
return self.station.country_id
@property
def platform_gaw_id(self):
if hasattr(self, '_platform_gaw_id'):
return self._platform_gaw_id
else:
return self.station.gaw_id
@property
def instrument_name(self):
if hasattr(self, '_instrument_name'):
return self._instrument_name
else:
return self.instrument.name
@property
def instrument_model(self):
if hasattr(self, '_instrument_model'):
return self._instrument_model
else:
return self.instrument.model
@property
def instrument_number(self):
if hasattr(self, '_instrument_number'):
return self._instrument_number
else:
return self.instrument.serial
def generate_ids(self):
"""Builds and sets class ID fields from other attributes"""
self.data_record_id = self.get_urn()
self.es_id = self.get_esid()
def get_urn(self):
"""generate data record URN"""
if all([hasattr(self, field) for field in self.id_dependencies]):
tokens = [getattr(self, field) for field in self.id_dependencies]
return ':'.join(map(str, tokens)).lower()
else:
return None
def get_esid(self):
"""generate data record ES identifier"""
dependencies = self.id_dependencies[:-1]
if all([hasattr(self, field) for field in dependencies]):
tokens = [getattr(self, field) for field in dependencies]
return ':'.join(map(str, tokens)).lower()
else:
return None
def get_waf_path(self, basepath):
"""generate WAF URL"""
datasetdirname = '{}_{}_{}'.format(self.content_category,
self.content_level,
self.content_form)
url_tokens = [
basepath.rstrip('/'),
'Archive-NewFormat',
datasetdirname,
'{}{}'.format(self.platform_type.lower(), self.station_id),
self.instrument_name.lower(),
self.timestamp_date.strftime('%Y'),
self.filename
]
return '/'.join(url_tokens)
@property
def __geo_interface__(self):
return {
'id': self.es_id,
'type': 'Feature',
'geometry': point2geojsongeometry(self.x, self.y, self.z),
'properties': {
'identifier': self.es_id,
'content_class': self.content_class,
'content_category': self.content_category,
'content_level': self.content_level,
'content_form': self.content_form,
'data_generation_date':
strftime_rfc3339(self.data_generation_date),
'data_generation_agency': self.data_generation_agency,
'data_generation_version': self.data_generation_version,
'data_generation_scientific_authority': self.data_generation_scientific_authority, # noqa
'platform_type': self.platform_type,
'platform_id': self.station_id,
'platform_name': self.platform_name,
'platform_country': self.platform_country,
'platform_gaw_id': self.platform_gaw_id,
'instrument_name': self.instrument_name,
'instrument_model': self.instrument_model,
'instrument_number': self.instrument_number,
'timestamp_utcoffset': self.timestamp_utcoffset,
'timestamp_date': strftime_rfc3339(self.timestamp_date),
'timestamp_time': None if self.timestamp_time is None \
else self.timestamp_time.isoformat(),
'published': self.published,
'received_datetime': strftime_rfc3339(self.received_datetime),
'inserted_datetime': strftime_rfc3339(self.inserted_datetime),
'processed_datetime':
strftime_rfc3339(self.processed_datetime),
'published_datetime':
strftime_rfc3339(self.published_datetime),
'number_of_observations': self.number_of_observations,
'ingest_filepath': self.ingest_filepath,
'filename': self.filename,
'output_filepath': self.output_filepath,
'url': self.url
}
}
def __repr__(self):
return 'DataRecord({}, {})'.format(self.data_record_id, self.url)
class Contribution(base):
"""Data Registry Contribution"""
__tablename__ = 'contributions'
id_field = 'contribution_id'
id_dependencies = ['project_id', 'dataset_id', 'station_id',
'instrument_name']
project_id = Column(String, ForeignKey('projects.project_id'),
nullable=False, default='WOUDC')
contribution_id = Column(String, primary_key=True)
dataset_id = Column(String, ForeignKey('datasets.dataset_id'),
nullable=False)
station_id = Column(String, ForeignKey('stations.station_id'),
nullable=False)
country_id = Column(String, ForeignKey('countries.country_id'),
nullable=False)
instrument_name = Column(String, nullable=False)
contributor_name = Column(String, nullable=False)
start_date = Column(Date, nullable=False)
end_date = Column(Date, nullable=True)
station = relationship('Station', backref=__tablename__)
country = relationship('Country', backref=__tablename__)
dataset = relationship('Dataset', backref=__tablename__)
def __init__(self, dict_):
self.project_id = dict_['project_id']
self.contribution_id = dict_['contribution_id']
self.station_id = dict_['station_id']
self.instrument_name = dict_['instrument_name']
self.contributor_name = dict_['contributor_name']
self.country_id = dict_['country_id']
self.dataset_id = dict_['dataset_id']
self.start_date = dict_['start_date']
self.end_date = dict_['end_date']
self.generate_ids()
try:
if isinstance(dict_['start_date'], datetime.date):
self.start_date = dict_['start_date']
else:
self.start_date = datetime.datetime.strptime(
dict_['start_date'], '%Y-%m-%d').date()
if dict_['end_date'] is None \
or isinstance(dict_['end_date'], datetime.date):
self.end_date = dict_['end_date']
elif dict_['end_date']:
self.end_date = datetime.datetime.strptime(
dict_['end_date'], '%Y-%m-%d').date()
except Exception as err:
LOGGER.error(err)
@property
def __geo_interface__(self):
return {
'id': self.contribution_id,
'type': 'Feature',
'geometry': point2geojsongeometry(self.station.x,
self.station.y, self.station.z),
'properties': {
'identifier': self.contribution_id,
'project_id': self.project_id,
'dataset_id': self.dataset_id,
'station_id': self.station_id,
'station_name': self.station.station_name.name,
'country_id': self.station.country_id,
'country_name_en': self.station.country.name_en,
'country_name_fr': self.station.country.name_fr,
'instrument_name': self.instrument_name,
'contributor_name': self.contributor_name,
'start_date': self.start_date,
'end_date': self.end_date
}
}
def __repr__(self):
return 'Contribution ({})'.format(self.contribution_id)
def generate_ids(self):
"""Builds and sets class ID field from other attributes"""
if all([hasattr(self, field) and getattr(self, field) is not None
for field in self.id_dependencies]):
components = [getattr(self, field)
for field in self.id_dependencies]
self.contribution_id = ':'.join(map(str, components))
class Notification(base):
"""Data Registry News Item"""
__tablename__ = 'notifications'
id_field = 'notification_id'
id_dependencies = ['title_en', 'published_date']
notification_id = Column(String, primary_key=True)
title_en = Column(String, nullable=False)
title_fr = Column(String, nullable=False)
description_en = Column(String, nullable=False)
description_fr = Column(String, nullable=False)
keywords_en = Column(String, nullable=False)
keywords_fr = Column(String, nullable=False)
published_date = Column(Date, nullable=False)
banner = Column(Boolean, nullable=False, default=False)
visible = Column(Boolean, nullable=False, default=True)
x = Column(Float, nullable=False)
y = Column(Float, nullable=False)
def __init__(self, dict_):
"""serializer"""
self.title_en = dict_['title_en']
self.title_fr = dict_['title_fr']
self.description_en = dict_['description_en']
self.description_fr = dict_['description_fr']
self.set_keywords_en(dict_['keywords_en'])
self.set_keywords_fr(dict_['keywords_fr'])
self.published_date = dict_['published']
self.banner = dict_.get('banner', False)
self.visible = dict_.get('visible', True)
self.x = dict_['x']
self.y = dict_['y']
year_month_day = datetime.datetime. \
strptime(self.published_date[0:10], '%Y-%m-%d')
self.notification_id = strftime_rfc3339(year_month_day)
def get_keywords_en(self):
return self.keywords_en.split(',')
def set_keywords_en(self, keywords):
self.keywords_en = ','.join(keywords)
def get_keywords_fr(self):
return self.keywords_fr.split(',')
def set_keywords_fr(self, keywords):
self.keywords_fr = ','.join(keywords)
@property
def __geo_interface__(self):
return {
'id': self.notification_id,
'type': 'Feature',
'geometry': point2geojsongeometry(self.x, self.y),
'properties': {
'title_en': self.title_en,
'title_fr': self.title_fr,
'description_en': self.description_en,
'description_fr': self.description_fr,
'keywords_en': self.get_keywords_en(),
'keywords_fr': self.get_keywords_fr(),
'published_date': strftime_rfc3339(self.published_date),
'banner': self.banner,
'visible': self.visible
}
}
def __repr__(self):
return 'Notification ({})'.format(self.notification_id)
def build_contributions(instrument_models):
"""function that forms contributions from other model lists"""
# List to store the final contribution_models
contribution_models = []
# contribution dict | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_log import log as logging
from oslo_utils import versionutils
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import sql
from sqlalchemy.sql import func
from nova import context as nova_context
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import cell_mapping
from nova.objects import fields
from nova.objects import virtual_interface
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register
class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add queued_for_delete
# Version 1.2: Add user_id
VERSION = '1.2'
fields = {
'id': fields.IntegerField(read_only=True),
'instance_uuid': fields.UUIDField(),
'cell_mapping': fields.ObjectField('CellMapping', nullable=True),
'project_id': fields.StringField(),
'user_id': fields.StringField(),
'queued_for_delete': fields.BooleanField(default=False),
}
def obj_make_compatible(self, primitive, target_version):
super(InstanceMapping, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'user_id' in primitive:
del primitive['user_id']
if target_version < (1, 1):
if 'queued_for_delete' in primitive:
del primitive['queued_for_delete']
def obj_load_attr(self, attrname):
if attrname == 'user_id':
LOG.error('The unset user_id attribute of an unmigrated instance '
'mapping should not be accessed.')
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute user_id is not lazy-loadable'))
super(InstanceMapping, self).obj_load_attr(attrname)
def _update_with_cell_id(self, updates):
cell_mapping_obj = updates.pop("cell_mapping", None)
if cell_mapping_obj:
updates["cell_id"] = cell_mapping_obj.id
return updates
@staticmethod
def _from_db_object(context, instance_mapping, db_instance_mapping):
for key in instance_mapping.fields:
db_value = db_instance_mapping.get(key)
if key == 'cell_mapping':
# cell_mapping can be None indicating that the instance has
# not been scheduled yet.
if db_value:
db_value = cell_mapping.CellMapping._from_db_object(
context, cell_mapping.CellMapping(), db_value)
if key == 'user_id' and db_value is None:
# NOTE(melwitt): If user_id is NULL, we can't set the field
# because it's non-nullable. We don't plan for any code to read
# the user_id field at this time, so skip setting it.
continue
setattr(instance_mapping, key, db_value)
instance_mapping.obj_reset_changes()
instance_mapping._context = context
return instance_mapping
@staticmethod
@api_db_api.context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_mapping = context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.instance_uuid == instance_uuid)\
.first()
if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
return db_mapping
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_mapping = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
@api_db_api.context_manager.writer
def _create_in_db(context, updates):
db_mapping = api_models.InstanceMapping()
db_mapping.update(updates)
db_mapping.save(context.session)
# NOTE: This is done because a later access will trigger a lazy load
# outside of the db session so it will fail. We don't lazy load
# cell_mapping on the object later because we never need an
# InstanceMapping without the CellMapping.
db_mapping.cell_mapping
return db_mapping
@base.remotable
def create(self):
changes = self.obj_get_changes()
changes = self._update_with_cell_id(changes)
if 'queued_for_delete' not in changes:
# NOTE(danms): If we are creating a mapping, it should be
# not queued_for_delete (unless we are being asked to
# create one in deleted state for some reason).
changes['queued_for_delete'] = False
db_mapping = self._create_in_db(self._context, changes)
self._from_db_object(self._context, self, db_mapping)
@staticmethod
@api_db_api.context_manager.writer
def _save_in_db(context, instance_uuid, updates):
db_mapping = context.session.query(
api_models.InstanceMapping).filter_by(
instance_uuid=instance_uuid).first()
if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
db_mapping.update(updates)
# NOTE: This is done because a later access will trigger a lazy load
# outside of the db session so it will fail. We don't lazy load
# cell_mapping on the object later because we never need an
# InstanceMapping without the CellMapping.
db_mapping.cell_mapping
context.session.add(db_mapping)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
changes = self._update_with_cell_id(changes)
try:
db_mapping = self._save_in_db(self._context, self.instance_uuid,
changes)
except orm_exc.StaleDataError:
# NOTE(melwitt): If the instance mapping has been deleted out from
# under us by conductor (delete requested while booting), we will
# encounter a StaleDataError after we retrieved the row and try to
# update it after it's been deleted. We can treat this like an
# instance mapping not found and allow the caller to handle it.
raise exception.InstanceMappingNotFound(uuid=self.instance_uuid)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
@api_db_api.context_manager.writer
def _destroy_in_db(context, instance_uuid):
result = context.session.query(api_models.InstanceMapping).filter_by(
instance_uuid=instance_uuid).delete()
if not result:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.instance_uuid)
@api_db_api.context_manager.writer
def populate_queued_for_delete(context, max_count):
cells = objects.CellMappingList.get_all(context)
processed = 0
for cell in cells:
ims = (
# Get a direct list of instance mappings for this cell which
# have not yet received a defined value decision for
# queued_for_delete
context.session.query(api_models.InstanceMapping)
.filter(
api_models.InstanceMapping.queued_for_delete == None) # noqa
.filter(api_models.InstanceMapping.cell_id == cell.id)
.limit(max_count).all())
ims_by_inst = {im.instance_uuid: im for im in ims}
if not ims_by_inst:
# If there is nothing from this cell to migrate, move on.
continue
with nova_context.target_cell(context, cell) as cctxt:
filters = {'uuid': list(ims_by_inst.keys()),
'deleted': True,
'soft_deleted': True}
instances = objects.InstanceList.get_by_filters(
cctxt, filters, expected_attrs=[])
# Walk through every deleted instance that has a mapping needing
# to be updated and update it
for instance in instances:
im = ims_by_inst.pop(instance.uuid)
im.queued_for_delete = True
context.session.add(im)
processed += 1
# Any instances we did not just hit must be not-deleted, so
# update the remaining mappings
for non_deleted_im in ims_by_inst.values():
non_deleted_im.queued_for_delete = False
context.session.add(non_deleted_im)
processed += 1
max_count -= len(ims)
if max_count <= 0:
break
return processed, processed
@api_db_api.context_manager.writer
def populate_user_id(context, max_count):
cells = objects.CellMappingList.get_all(context)
cms_by_id = {cell.id: cell for cell in cells}
done = 0
unmigratable_ims = False
ims = (
# Get a list of instance mappings which do not have user_id populated.
# We need to include records with queued_for_delete=True because they
# include SOFT_DELETED instances, which could be restored at any time
# in the future. If we don't migrate SOFT_DELETED instances now, we
# wouldn't be able to retire this migration code later. Also filter
# out the marker instance created by the virtual interface migration.
context.session.query(api_models.InstanceMapping)
.filter_by(user_id=None)
.filter(api_models.InstanceMapping.project_id !=
virtual_interface.FAKE_UUID)
.limit(max_count).all())
found = len(ims)
ims_by_inst_uuid = {}
inst_uuids_by_cell_id = collections.defaultdict(set)
for im in ims:
ims_by_inst_uuid[im.instance_uuid] = im
inst_uuids_by_cell_id[im.cell_id].add(im.instance_uuid)
for cell_id, inst_uuids in inst_uuids_by_cell_id.items():
# We cannot migrate instance mappings that don't have a cell yet.
if cell_id is None:
unmigratable_ims = True
continue
with nova_context.target_cell(context, cms_by_id[cell_id]) as cctxt:
# We need to migrate SOFT_DELETED instances because they could be
# restored at any time in the future, preventing us from being able
# to remove any other interim online data migration code we have,
# if we don't migrate them here.
# NOTE: it's not possible to query only for SOFT_DELETED instances.
# We must query for both deleted and SOFT_DELETED instances.
filters = {'uuid': inst_uuids}
try:
instances = objects.InstanceList.get_by_filters(
cctxt, filters, expected_attrs=[])
except Exception as exp:
LOG.warning('Encountered exception: "%s" while querying '
'instances from cell: %s. Continuing to the next '
'cell.', str(exp),
cms_by_id[cell_id].identity)
continue
# Walk through every instance that has a mapping needing to be updated
# and update it.
for instance in instances:
im = ims_by_inst_uuid.pop(instance.uuid)
im.user_id = instance.user_id
context.session.add(im)
done += 1
if ims_by_inst_uuid:
unmigratable_ims = True
if done >= max_count:
break
if unmigratable_ims:
LOG.warning('Some instance mappings were not migratable. This may '
'be transient due to in-flight instance builds, or could '
'be due to stale data that will be cleaned up after '
'running "nova-manage db archive_deleted_rows --purge".')
return found, done
@base.NovaObjectRegistry.register
class InstanceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_cell_id method.
# Version 1.2: Added get_by_instance_uuids method
# Version 1.3: Added get_counts()
VERSION = '1.3'
fields = {
'objects': fields.ListOfObjectsField('InstanceMapping'),
}
@staticmethod
@api_db_api.context_manager.reader
def _get_by_project_id_from_db(context, project_id):
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.project_id == project_id).all()
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
db_mappings = cls._get_by_project_id_from_db(context, project_id)
return base.obj_make_list(context, cls(), objects.InstanceMapping,
db_mappings)
@staticmethod
@api_db_api.context_manager.reader
def _get_by_cell_id_from_db(context, cell_id):
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.cell_id == cell_id).all()
@base.remotable_classmethod
def get_by_cell_id(cls, context, cell_id):
db_mappings = cls._get_by_cell_id_from_db(context, cell_id)
return base.obj_make_list(context, cls(), objects.InstanceMapping,
db_mappings)
@staticmethod
@api_db_api.context_manager.reader
def _get_by_instance_uuids_from_db(context, uuids):
return context.session.query(api_models.InstanceMapping)\
.options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids))\
.all()
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, uuids):
db_mappings = cls._get_by_instance_uuids_from_db(context, uuids)
return base.obj_make_list(context, cls(), objects.InstanceMapping,
db_mappings)
@staticmethod
@api_db_api.context_manager.writer
def _destroy_bulk_in_db(context, instance_uuids):
return context.session.query(api_models.InstanceMapping).filter(
api_models.InstanceMapping.instance_uuid.in_(instance_uuids)).\
delete(synchronize_session=False)
@classmethod
def destroy_bulk(cls, context, instance_uuids):
return cls._destroy_bulk_in_db(context, instance_uuids)
@staticmethod
@api_db_api.context_manager.reader
def _get_not_deleted_by_cell_and_project_from_db(context, cell_uuid,
project_id, limit):
query = context.session.query(api_models.InstanceMapping)
if project_id is not | |
<reponame>Heckie75/kodi-addon-sane-scanner<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import datetime
import os
import re
import shutil
import subprocess
import sys
import time
import urllib.parse
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmcvfs
__PLUGIN_ID__ = "plugin.picture.sane-scanner"
_PLUGIN_NAME = "Kodi Sane Scanner"
_TMP_FOLDER = "/tmp/"
_IMG_FILE = "kodi-sane-scanner-img"
_PDF_PREVIEW_FILE = "kodi-sane-scanner-pdf"
_SCANNER_MODES = [
["--mode", "Lineart"],
["--mode", "Gray"],
["--mode", "Color"]
]
_SCANNNER_RESOLUTIONS = [
["--resolution", "150"],
["--resolution", "200"],
["--resolution", "300"],
["--resolution", "600"]
]
_ARCHIVE_RESOLUTIONS = [
"150",
"200",
"300",
"600"
]
_SCANNER_DIMENSIONS = [
[],
["-l", "0", "-t", "0", "-x", "216mm", "-y", "279mm"],
["-l", "0", "-t", "0", "-x", "210mm", "-y", "297mm"],
["-l", "0", "-t", "0", "-x", "148mm", "-y", "210mm"],
["-l", "0", "-t", "0", "-x", "105mm", "-y", "148mm"],
]
_SCANNER_FORMAT = [
"png",
"jpeg"
]
settings = xbmcaddon.Addon(id=__PLUGIN_ID__)
addon_dir = xbmcvfs.translatePath(settings.getAddonInfo('path'))
_menu = []
class ScanException(Exception):
pass
def find_scanner():
p1 = subprocess.Popen(["scanimage", "-f" "%d %v %m %t%n"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p1.communicate()
i = 0
for match in re.finditer('([^ ]+) (.+)', out.decode("utf-8")):
settings.setSetting("scanner_%i" % i, "%s|%s" %
(match.group(2), match.group(1)))
i = i + 1
p1.stdout.close()
for j in range(i, 2):
settings.setSetting("scanner_%i" % j, "")
if i == 0:
xbmc.executebuiltin(
"Notification(No scanner found, "
"Check if scanner is connected!)")
else:
xbmc.executebuiltin(
"Notification(Scanners found, "
"%i scanners added to device list)" % i)
def find_printer():
p1 = subprocess.Popen(["lpstat", "-e"],
stdout=subprocess.PIPE)
out, err = p1.communicate()
i = 0
for printer in out.decode("utf-8").split("\n"):
settings.setSetting("printer_%i" % (i + 1), "%s"
% printer)
i = i + 1
p1.stdout.close()
for j in range(i, 5):
settings.setSetting("printer_%i" % (j + 1), "")
if i == 0:
xbmc.executebuiltin(
"Notification(No printer found, "
"Check if printer is connected!)")
else:
xbmc.executebuiltin(
"Notification(Printers found, "
"%i printers added to device list)" % i)
def _get_scanner():
scanner = settings.getSetting("scanner_scanner")
if scanner == "2":
return None
else:
return settings.getSetting("scanner_%s" % scanner).split("|")
def _get_printer():
printer = settings.getSetting("output_printer")
if printer != "0":
return settings.getSetting("printer_%s" % printer)
else:
return ""
def _get_format():
return _SCANNER_FORMAT[int(settings.getSetting("scanner_format"))]
def _build_param_string(param, values, current=""):
if values == None:
return current
for v in values:
current += "?" if len(current) == 0 else "&"
current += param + "=" + str(v)
return current
def _add_list_item(entry, path):
if path == "/":
path = ""
item_path = path + "/" + entry["path"]
param_string = ""
if "exec" in entry:
param_string = _build_param_string(
param="exec",
values=entry["exec"],
current=param_string)
if "param" in entry:
param_string = _build_param_string(
param=entry["param"][0],
values=[entry["param"][1]],
current=param_string)
if "msg" in entry:
param_string = _build_param_string(
param="msg",
values=[entry["msg"]],
current=param_string)
if "node" in entry:
is_folder = True
else:
is_folder = False
label = entry["name"]
if "image" in entry:
icon_file = entry["image"]
elif "icon" in entry:
icon_file = os.path.join(addon_dir,
"resources", "assets",
entry["icon"] + ".png")
else:
icon_file = None
li = xbmcgui.ListItem(label)
li.setArt({"icon": icon_file})
if "image" in entry:
li.setAvailableFanart([
{"image": icon_file, "preview": icon_file}
])
if "contextItems" in entry:
commands = []
for ci in entry["contextItems"]:
p = _build_param_string(
param="exec",
values=[ci[1]],
current="")
url = "plugin://%s%s%s" % (__PLUGIN_ID__, item_path, p)
commands.append((ci[0], 'XBMC.RunPlugin(%s)' % url, ))
li.addContextMenuItems(commands)
xbmcplugin.addDirectoryItem(handle=addon_handle,
listitem=li,
url="plugin://" + __PLUGIN_ID__
+ item_path
+ param_string,
isFolder=is_folder)
def _build_pdf_preview(filename):
xbmc.executebuiltin("Notification(%s, %s, %s/icon.png)"
% (_PLUGIN_NAME, "Rendering preview... be patient!", addon_dir))
_clean_preview()
_convert_for_preview(filename)
preview_entries = []
i = 0
for f in _get_preview_files():
i = i + 1
preview_entries += [
{
"path": "/%s" % f,
"name": "Page %i" % i,
"image": "%s%s" % (_TMP_FOLDER, f),
"exec": ["preview"]
}
]
entries = [
{
"path": "archive",
"name": "Archive",
"node": preview_entries
}
]
return entries
def _build_archive():
_clean_preview()
pdf_files = _get_pdf_files()
if settings.getSetting("archive_operations") == "false":
contextItems = []
else:
contextItems = [
("Rename PDF file", "rename"),
("Delete PDF file", "delete")
]
pdf_entries = []
for filename in pdf_files:
pdf_entries += [
{
"path": filename,
"name": filename,
"contextItems": contextItems,
"node": []
}
]
entries = [
{
"path": "archive",
"name": "Archive",
"node": pdf_entries
}
]
return entries
def _build_root():
tmp_files = _get_tmp_files()
entries = [
{
"path": "/",
"name": "scan image",
"icon": "icon_scan",
"exec": ["scan"],
"msg": "Scanning page... be patient!",
"node": []
}
]
if len(tmp_files) > 0:
entries += [
{
"path": "/",
"name": "create PDF",
"icon": "icon_pdf",
"exec": ["pdf"],
"msg": "Creating PDF file",
"node": []
}
]
if len(tmp_files) > 0 and settings.getSetting("output_email") == "1":
entries += [
{
"path": "/",
"name": "create PDF and send email to %s" % settings.getSetting("output_emailaddress"),
"icon": "icon_email",
"exec": ["email"],
"msg": "Sending to %s" % settings.getSetting("output_emailaddress"),
"node": []
}
]
if len(tmp_files) > 0 and settings.getSetting("output_printer") != "0":
entries += [
{
"path": "/",
"name": "create PDF and print on %s" % _get_printer(),
"icon": "icon_print",
"exec": ["print"],
"msg": "Printing on %s" % _get_printer(),
"node": []
}
]
i = 0
for f in tmp_files:
i = i + 1
entries += [
{
"path": "/%s" % f,
"name": "preview page %i" % i,
"image": "%s%s" % (_TMP_FOLDER, f),
"exec": ["preview"]
}
]
if len(tmp_files) > 0:
entries += [
{
"path": "/",
"name": "remove latest page",
"icon": "icon_undo",
"exec": ["undo"],
"msg": "removing latest page",
"node": []
},
{
"path": "/",
"name": "clean whole filing",
"icon": "icon_trash",
"exec": ["clean"],
"msg": "Cleaning all pages",
"node": []
}
]
if settings.getSetting("archive") == "true":
entries += [
{
"path": "archive",
"name": "Archive",
"node": []
}
]
return entries
def _build_dir_structure(path, url_params):
global _menu
splitted_path = path.split("/")
splitted_path.pop(0)
entries = []
if path == "/":
entries = _build_root()
elif path == "/archive":
entries = _build_archive()
elif path.startswith("/archive") and len(splitted_path) == 2:
entries = _build_pdf_preview(splitted_path[1])
_menu = [
{
"path": "",
"node": entries
}
]
def _get_directory_by_path(path):
if path == "/":
return _menu[0]
tokens = path.split("/")[1:]
directory = _menu[0]
while len(tokens) > 0:
path = tokens.pop(0)
for node in directory["node"]:
if node["path"] == path:
directory = node
break
return directory
def browse(path, url_params):
try:
_build_dir_structure(path, url_params)
directory = _get_directory_by_path(path)
for entry in directory["node"]:
_add_list_item(entry, path)
xbmcplugin.endOfDirectory(addon_handle, cacheToDisc=False)
except ScanException:
xbmc.executebuiltin("Notification(%s, %s, %s/icon.png)"
% ("Execution failed!",
"Try again!", addon_dir))
def _get_tmp_files():
return _get_files(_TMP_FOLDER, "^" + _IMG_FILE)
def _get_pdf_files():
return _get_files(settings.getSetting("output_folder"), "^.+\.pdf$")
def _get_preview_files():
return _get_files(_TMP_FOLDER, "^" + _PDF_PREVIEW_FILE)
def _get_files(dir, pattern):
p = re.compile(pattern, re.IGNORECASE)
files = os.listdir(dir)
result = []
for s in files:
m = p.match(s)
if m:
result += [s]
result.sort()
return result
def _scan():
call = ["scanimage",
"--format=%s" % _get_format(),
"--brightness", settings.getSetting("scanner_brightness"),
"--contrast", settings.getSetting("scanner_contrast")
]
_scanner = _get_scanner()
if _scanner != None and len(_scanner) == 2:
call += ["--device-name=%s" % _scanner[1]]
call += _SCANNER_DIMENSIONS[
int(settings.getSetting("scanner_dimension"))]
call += _SCANNER_MODES[
int(settings.getSetting("scanner_mode"))]
call += _SCANNNER_RESOLUTIONS[
int(settings.getSetting("scanner_resolution"))]
tmp_file = open("%s%s.%i.%s" % (_TMP_FOLDER, _IMG_FILE,
time.time(),
_get_format()),
"w")
p = subprocess.Popen(call, stdout=tmp_file)
p.wait()
tmp_file.close()
def _pdf():
tmp_files = _get_tmp_files()
full_path = []
for f in tmp_files:
full_path += ["%s%s" % (_TMP_FOLDER, f)]
pdf_file = "%s.scan.pdf" % datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
call = ["convert"]
call += full_path
call += ["%s%s" % (_TMP_FOLDER, pdf_file)]
p = subprocess.Popen(call, stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
return pdf_file
def _convert_for_preview(input_file):
call = ["convert",
"-density", _ARCHIVE_RESOLUTIONS[
int(settings.getSetting("archive_resolution"))],
"-quality", "90",
"-background", "white", "-alpha", "background", "-alpha", "off",
"%s%s" % (settings.getSetting("output_folder"), input_file),
"%s%s.%s%s" % (_TMP_FOLDER, _PDF_PREVIEW_FILE, input_file, ".png")
]
p = subprocess.Popen(call, stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
def _ocr(pdf_file):
pdf_file = "%s%s" % (_TMP_FOLDER, pdf_file)
ocr_file = "%s.ocr" % pdf_file
call = [addon_dir + os.sep + "resources"
+ os.sep + "lib"
+ os.sep + "ocrmypdf_wrapper",
pdf_file,
ocr_file]
p = subprocess.Popen(call, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
p.stdout.close()
os.remove(pdf_file)
shutil.move(ocr_file, pdf_file)
def _rename_pdf(path):
splitted_path = path.split("/")
filename = splitted_path[-1]
renamed_file = xbmcgui.Dialog().input("Rename PDF file",
filename,
xbmcgui.INPUT_ALPHANUM)
if renamed_file != "":
archive = settings.getSetting("output_folder")
shutil.move("%s%s" % (archive, filename),
"%s%s" % (archive, renamed_file))
def _delete_pdf(path):
splitted_path = path.split("/")
file_to_delete = splitted_path[-1]
ret = xbmcgui.Dialog().yesno(_PLUGIN_NAME, "Do you want to delete %s?"
% file_to_delete)
if ret:
os.remove("%s%s" % (settings.getSetting("output_folder"),
file_to_delete))
def _lampoff():
call = ["scanimage",
"-n", "--lamp-switch=no"]
_scanner = _get_scanner()
if _scanner != None and len(_scanner) == 2:
call += ["--device-name=%s" % _scanner[1]]
p = subprocess.Popen(call, stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
def _email(pdf_file):
call = ["mail",
"-A", "%s%s" % (settings.getSetting("output_folder"),
pdf_file),
"-s", "%s: %s" % (_PLUGIN_NAME, pdf_file),
settings.getSetting("output_emailaddress")
]
p = subprocess.Popen(call, stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
def _print(pdf_file):
call = ["lp",
"-t", "%s: %s" % (_PLUGIN_NAME, pdf_file),
]
if _get_printer() != "":
call += ["-d", _get_printer()]
call += ["%s%s" % (settings.getSetting("output_folder"), pdf_file)]
p = subprocess.Popen(call, stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
def _undo():
tmp_files = _get_tmp_files()
os.remove("%s%s" % (_TMP_FOLDER, tmp_files[-1]))
def _clean():
tmp_files = _get_tmp_files()
for f in tmp_files:
os.remove("%s%s" % (_TMP_FOLDER, f))
def _clean_preview():
for f in _get_preview_files():
os.remove("%s%s" % (_TMP_FOLDER, f))
def _preview(path):
| |
glGetTransformFeedbacki64_v(xfb, pname, index, param):
pass
@params(api='gl', prms=['mode', 'count', 'type', 'constindices', 'drawcount'])
def glMultiDrawElements(mode, count, type, constindices, drawcount):
pass
@params(api='gl', prms=['n', 'bufs'])
def glDrawBuffers(n, bufs):
pass
@params(api='gl', prms=['framebuffer', 'src'])
def glNamedFramebufferReadBuffer(framebuffer, src):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glGetTexGenfv(coord, pname, params):
pass
@params(api='gl', prms=['target', 'id'])
def glBindTransformFeedback(target, id):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord2iv(target, v):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glSecondaryColor3f(red, green, blue):
pass
@params(api='gl', prms=['v'])
def glRasterPos3iv(v):
pass
@params(api='gl', prms=['type', 'value'])
def glVertexP2ui(type, value):
pass
@params(api='gl', prms=['target', 'format', 'type', 'bufSize', 'image'])
def glGetnConvolutionFilter(target, format, type, bufSize, image):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glSecondaryColor3b(red, green, blue):
pass
@params(api='gl', prms=['v'])
def glTexCoord4sv(v):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform2uiv(location, count, value):
pass
@params(api='gl', prms=[])
def glFinish():
pass
@params(api='gl', prms=['x', 'y'])
def glRasterPos2s(x, y):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform1uiv(location, count, value):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix2dv(location, count, transpose, value):
pass
@params(api='gl', prms=['c'])
def glIndexdv(c):
pass
@params(api='gl', prms=['v'])
def glTexCoord3iv(v):
pass
@params(api='gl', prms=['depth'])
def glClearDepth(depth):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix4dv(location, count, transpose, value):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix4x3dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['v'])
def glVertex4dv(v):
pass
@params(api='gl', prms=['target', 'n', 'textures'])
def glCreateTextures(target, n, textures):
pass
@params(api='gl', prms=['n', 'buffers'])
def glCreateBuffers(n, buffers):
pass
@params(api='gl', prms=['m'])
def glMultTransposeMatrixf(m):
pass
@params(api='gl', prms=['flag'])
def glEdgeFlagv(flag):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix4x3dv(location, count, transpose, value):
pass
@params(api='gl', prms=['n', 'ids'])
def glDeleteQueries(n, ids):
pass
@params(api='gl', prms=['type', 'coords'])
def glNormalP3uiv(type, coords):
pass
@params(api='gl', prms=['x', 'y'])
def glRasterPos2d(x, y):
pass
@params(api='gl', prms=[])
def glInitNames():
pass
@params(api='gl', prms=['v'])
def glColor3dv(v):
pass
@params(api='gl', prms=['target', 'reset', 'format', 'type', 'bufSize', 'values'])
def glGetnMinmax(target, reset, format, type, bufSize, values):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferuiv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['index', 'pname', 'params'])
def glGetVertexAttribfv(index, pname, params):
pass
@params(api='gl', prms=['num_groups_x', 'num_groups_y', 'num_groups_z'])
def glDispatchCompute(num_groups_x, num_groups_y, num_groups_z):
pass
@params(api='gl', prms=['program', 'index', 'bufSize', 'length', 'size', 'type', 'name'])
def glGetActiveAttrib(program, index, bufSize, length, size, type, name):
pass
@params(api='gl', prms=['location', 'v0', 'v1', 'v2'])
def glUniform3i(location, v0, v1, v2):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'yoffset', 'width', 'height', 'format', 'type', 'pixels'])
def glTexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels):
pass
@params(api='gl', prms=['opcode'])
def glLogicOp(opcode):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix3x4fv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['pname', 'param'])
def glPixelTransferf(pname, param):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterIuiv(texture, pname, params):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix4dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix3x4dv(location, count, transpose, value):
pass
@params(api='gl', prms=['mode', 'id', 'stream'])
def glDrawTransformFeedbackStream(mode, id, stream):
pass
@params(api='gl', prms=['location', 'v0', 'v1', 'v2'])
def glUniform3ui(location, v0, v1, v2):
pass
@params(api='gl', prms=['mode'])
def glProvokingVertex(mode):
pass
@params(api='gl', prms=['count', 'shaders', 'binaryformat', 'binary', 'length'])
def glShaderBinary(count, shaders, binaryformat, binary, length):
pass
@params(api='gl', prms=['coord', 'pname', 'params'])
def glTexGeniv(coord, pname, params):
pass
@params(api='gl', prms=['mode', 'count', 'type', 'indices'])
def glDrawElements(mode, count, type, indices):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform4iv(program, location, count, value):
pass
@params(api='gl', prms=['texture'])
def glClientActiveTexture(texture):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform1iv(location, count, value):
pass
@params(api='gl', prms=['mode', 'first', 'count', 'instancecount'])
def glDrawArraysInstanced(mode, first, count, instancecount):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4uiv(index, v):
pass
@params(api='gl', prms=['target', 'index'])
def glEndQueryIndexed(target, index):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform1iv(program, location, count, value):
pass
@params(api='gl', prms=['target', 'renderbuffer'])
def glBindRenderbuffer(target, renderbuffer):
pass
@params(api='gl', prms=['face', 'pname', 'params'])
def glMaterialiv(face, pname, params):
pass
@params(api='gl', prms=['program'])
def glIsProgram(program):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4fv(index, v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix2x3dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['map', 'bufSize', 'values'])
def glGetnPixelMapfv(map, bufSize, values):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib2fv(index, v):
pass
@params(api='gl', prms=['array'])
def glDisableClientState(array):
pass
@params(api='gl', prms=['v'])
def glColor4uiv(v):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1', 'v2'])
def glProgramUniform3i(program, location, v0, v1, v2):
pass
@params(api='gl', prms=['mode', 'i1', 'i2', 'j1', 'j2'])
def glEvalMesh2(mode, i1, i2, j1, j2):
pass
@params(api='gl', prms=['mode', 'i1', 'i2'])
def glEvalMesh1(mode, i1, i2):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1', 'v2'])
def glProgramUniform3d(program, location, v0, v1, v2):
pass
@params(api='gl', prms=['u'])
def glEvalCoord2fv(u):
pass
@params(api='gl', prms=['m'])
def glLoadTransposeMatrixd(m):
pass
@params(api='gl', prms=['m'])
def glLoadTransposeMatrixf(m):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttribI1ui(index, x):
pass
@params(api='gl', prms=['bufSize', 'pattern'])
def glGetnPolygonStipple(bufSize, pattern):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth'])
def glInvalidateTexSubImage(texture, level, xoffset, yoffset, zoffset, width, height, depth):
pass
@params(api='gl', prms=['pname', 'data'])
def glGetInteger64v(pname, data):
pass
@params(api='gl', prms=['plane', 'equation'])
def glClipPlane(plane, equation):
pass
@params(api='gl', prms=['c'])
def glIndexub(c):
pass
@params(api='gl', prms=['framebuffer', 'attachment', 'renderbuffertarget', 'renderbuffer'])
def glNamedFramebufferRenderbuffer(framebuffer, attachment, renderbuffertarget, renderbuffer):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4Niv(index, v):
pass
@params(api='gl', prms=['buffer', 'drawbuffer', 'value'])
def glClearBufferiv(buffer, drawbuffer, value):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP4uiv(type, color):
pass
@params(api='gl', prms=['texture', 'level', 'pname', 'params'])
def glGetTextureLevelParameterfv(texture, level, pname, params):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord1fv(target, v):
pass
@params(api='gl', prms=['sampler', 'pname', 'params'])
def glGetSamplerParameterIuiv(sampler, pname, params):
pass
@params(api='gl', prms=['type', 'coords'])
def glTexCoordP3ui(type, coords):
pass
@params(api='gl', prms=['location', 'v0', 'v1'])
def glUniform2f(location, v0, v1):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'width', 'format', 'type', 'pixels'])
def glTextureSubImage1D(texture, level, xoffset, width, format, type, pixels):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glWindowPos3s(x, y, z):
pass
@params(api='gl', prms=['d'])
def glClearDepthf(d):
pass
@params(api='gl', prms=['texture', 'internalformat', 'buffer', 'offset', 'size'])
def glTextureBufferRange(texture, internalformat, buffer, offset, size):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glWindowPos3i(x, y, z):
pass
@params(api='gl', prms=['x', 'y', 'z'])
def glWindowPos3d(x, y, z):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP4ui(texture, type, coords):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glColor3us(red, green, blue):
pass
@params(api='gl', prms=['light', 'pname', 'params'])
def glGetLightiv(light, pname, params):
pass
@params(api='gl', prms=['target', 's', 't', 'r', 'q'])
def glMultiTexCoord4f(target, s, t, r, q):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glColor3ub(red, green, blue):
pass
@params(api='gl', prms=['target', 's', 't', 'r', 'q'])
def glMultiTexCoord4d(target, s, t, r, q):
pass
@params(api='gl', prms=['red', 'green', 'blue'])
def glColor3ui(red, green, blue):
pass
@params(api='gl', prms=['target', 's', 't', 'r', 'q'])
def glMultiTexCoord4i(target, s, t, r, q):
pass
@params(api='gl', prms=['mask'])
def glGetPolygonStipple(mask):
pass
@params(api='gl', prms=['location', 'x', 'y'])
def glUniform2d(location, x, y):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z', 'w'])
def glVertexAttribI4ui(index, x, y, z, w):
pass
@params(api='gl', prms=['red', 'green', 'blue', 'alpha'])
def glColorMask(red, green, blue, alpha):
pass
@params(api='gl', prms=['target', 'level', 'format', 'type', 'bufSize', 'pixels'])
def glGetnTexImage(target, level, format, type, bufSize, pixels):
pass
@params(api='gl', prms=['mode'])
def glBlendEquation(mode):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord3dv(target, v):
pass
@params(api='gl', prms=['v'])
def glColor4sv(v):
pass
@params(api='gl', prms=['program', 'programInterface', 'index', 'propCount', 'props', 'bufSize', 'length', 'params'])
def glGetProgramResourceiv(program, programInterface, index, propCount, props, bufSize, length, params):
pass
@params(api='gl', prms=['target', 'internalformat', 'format', 'type', 'data'])
def glClearBufferData(target, internalformat, format, type, data):
pass
@params(api='gl', prms=['primitiveMode'])
def glBeginTransformFeedback(primitiveMode):
pass
@params(api='gl', prms=['v'])
def glColor3iv(v):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib3sv(index, v):
pass
@params(api='gl', prms=['target', 'level', 'internalformat', 'width', 'border', 'imageSize', 'data'])
def glCompressedTexImage1D(target, level, internalformat, width, border, imageSize, data):
pass
@params(api='gl', prms=['n', 'ids'])
def glDeleteTransformFeedbacks(n, ids):
pass
@params(api='gl', prms=['mode', 'start', 'end', 'count', 'type', 'indices', 'basevertex'])
def glDrawRangeElementsBaseVertex(mode, start, end, count, type, indices, basevertex):
pass
@params(api='gl', prms=['program', 'index', 'name'])
def glBindAttribLocation(program, index, name):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib1dv(index, v):
pass
@params(api='gl', prms=['buf', 'srcRGB', 'dstRGB', 'srcAlpha', 'dstAlpha'])
def glBlendFuncSeparatei(buf, srcRGB, dstRGB, srcAlpha, dstAlpha):
pass
@params(api='gl', prms=['location', 'v0', 'v1'])
def glUniform2ui(location, v0, v1):
pass
@params(api='gl', prms=['pname', 'param'])
def glPixelTransferi(pname, param):
pass
@params(api='gl', prms=['v'])
def glWindowPos2fv(v):
pass
@params(api='gl', prms=['target', 'index'])
def glDisablei(target, index):
pass
@params(api='gl', prms=['sync', 'pname', 'bufSize', 'length', 'values'])
def glGetSynciv(sync, pname, bufSize, length, values):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1'])
def glProgramUniform2i(program, location, v0, v1):
pass
@params(api='gl', prms=['program', 'bufSize', 'length', 'binaryFormat', 'binary'])
def glGetProgramBinary(program, bufSize, length, binaryFormat, binary):
pass
@params(api='gl', prms=['i'])
def glEvalPoint1(i):
pass
@params(api='gl', prms=['i', 'j'])
def glEvalPoint2(i, j):
pass
@params(api='gl', prms=[])
def glPauseTransformFeedback():
pass
@params(api='gl', prms=['n', 'ids'])
def glCreateTransformFeedbacks(n, ids):
pass
@params(api='gl', prms=['target', 'level', 'xoffset', 'width', 'format', 'type', 'pixels'])
def glTexSubImage1D(target, level, xoffset, width, format, type, pixels):
pass
@params(api='gl', prms=['index', 'type', 'normalized', 'value'])
def glVertexAttribP3uiv(index, type, normalized, value):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI4iv(index, v):
pass
@params(api='gl', prms=['vaobj', 'pname', 'param'])
def glGetVertexArrayiv(vaobj, pname, param):
pass
@params(api='gl', prms=['name'])
def glLoadName(name):
pass
@params(api='gl', prms=['m'])
def glLoadMatrixf(m):
pass
@params(api='gl', prms=['m'])
def glLoadMatrixd(m):
pass
@params(api='gl', prms=['target', 'pname', 'params'])
def glTexParameterfv(target, pname, params):
pass
@params(api='gl', prms=['location', 'count', 'value'])
def glUniform3dv(location, count, value):
pass
@params(api='gl', prms=['face', 'func', 'ref', 'mask'])
def glStencilFuncSeparate(face, func, ref, mask):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform3fv(program, location, count, value):
pass
@params(api='gl', prms=['first', 'count', 'samplers'])
def glBindSamplers(first, count, samplers):
pass
@params(api='gl', prms=['id', 'pname', 'params'])
def glGetQueryObjectui64v(id, pname, params):
pass
@params(api='gl', prms=['texture', 'level', 'format', 'type', 'bufSize', 'pixels'])
def glGetTextureImage(texture, level, format, type, bufSize, pixels):
pass
@params(api='gl', prms=['program', 'location', 'count', 'value'])
def glProgramUniform1fv(program, location, count, value):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix4fv(location, count, transpose, value):
pass
@params(api='gl', prms=['n', 'pipelines'])
def glDeleteProgramPipelines(n, pipelines):
pass
@params(api='gl', prms=['v'])
def glVertex3fv(v):
pass
@params(api='gl', prms=['x', 'y'])
def glWindowPos2s(x, y):
pass
@params(api='gl', prms=['x', 'y'])
def glWindowPos2i(x, y):
pass
@params(api='gl', prms=['x', 'y'])
def glWindowPos2f(x, y):
pass
@params(api='gl', prms=['x', 'y'])
def glWindowPos2d(x, y):
pass
@params(api='gl', prms=['shadertype', 'count', 'indices'])
def glUniformSubroutinesuiv(shadertype, count, indices):
pass
@params(api='gl', prms=['v1', 'v2'])
def glRectdv(v1, v2):
pass
@params(api='gl', prms=['type', 'color'])
def glColorP3uiv(type, color):
pass
@params(api='gl', prms=['coord'])
def glFogCoordfv(coord):
pass
@params(api='gl', prms=['shader'])
def glCompileShader(shader):
pass
@params(api='gl', prms=['c'])
def glIndexfv(c):
pass
@params(api='gl', prms=['texture', 'type', 'coords'])
def glMultiTexCoordP3ui(texture, type, coords):
pass
@params(api='gl', prms=['v'])
def glNormal3sv(v):
pass
@params(api='gl', prms=['target', 'numAttachments', 'attachments'])
def glInvalidateFramebuffer(target, numAttachments, attachments):
pass
@params(api='gl', prms=['target', 'level', 'internalformat', 'width', 'height', 'border', 'imageSize', 'data'])
def glCompressedTexImage2D(target, level, internalformat, width, height, border, imageSize, data):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttrib1f(index, x):
pass
@params(api='gl', prms=['v'])
def glVertex4fv(v):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'depth', 'stencil'])
def glClearNamedFramebufferfi(framebuffer, buffer, drawbuffer, depth, stencil):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjectuiv(id, buffer, pname, offset):
pass
@params(api='gl', prms=['framebuffer', 'buffer', 'drawbuffer', 'value'])
def glClearNamedFramebufferfv(framebuffer, buffer, drawbuffer, value):
pass
@params(api='gl', prms=['index', 'x'])
def glVertexAttrib1s(index, x):
pass
@params(api='gl', prms=['target', 'v'])
def glMultiTexCoord1sv(target, v):
pass
@params(api='gl', prms=['program'])
def glDeleteProgram(program):
pass
@params(api='gl', prms=['v'])
def glColor4bv(v):
pass
@params(api='gl', prms=['x', 'y'])
def glRasterPos2f(x, y):
pass
@params(api='gl', prms=[])
def glLoadIdentity():
pass
@params(api='gl', prms=['v'])
def glRasterPos4iv(v):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix4x3fv(location, count, transpose, value):
pass
@params(api='gl', prms=['buffer', 'drawbuffer', 'value'])
def glClearBufferfv(buffer, drawbuffer, value):
pass
@params(api='gl', prms=[])
def glTextureBarrier():
pass
@params(api='gl', prms=['buffer', 'drawbuffer', 'depth', 'stencil'])
def glClearBufferfi(buffer, drawbuffer, depth, stencil):
pass
@params(api='gl', prms=['mode', 'indirect'])
def glDrawArraysIndirect(mode, indirect):
pass
@params(api='gl', prms=['n', 'arrays'])
def glGenVertexArrays(n, arrays):
pass
@params(api='gl', prms=['vaobj', 'index'])
def glEnableVertexArrayAttrib(vaobj, index):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix3x2dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['bindingindex', 'divisor'])
def glVertexBindingDivisor(bindingindex, divisor):
pass
@params(api='gl', prms=['sampler', 'pname', 'params'])
def glGetSamplerParameterIiv(sampler, pname, params):
pass
@params(api='gl', prms=['location', 'count', 'transpose', 'value'])
def glUniformMatrix4x2fv(location, count, transpose, value):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z'])
def glVertexAttrib3f(index, x, y, z):
pass
@params(api='gl', prms=['id', 'buffer', 'pname', 'offset'])
def glGetQueryBufferObjecti64v(id, buffer, pname, offset):
pass
@params(api='gl', prms=['index', 'pname', 'params'])
def glGetVertexAttribdv(index, pname, params):
pass
@params(api='gl', prms=['location', 'v0'])
def glUniform1ui(location, v0):
pass
@params(api='gl', prms=['readFramebuffer', 'drawFramebuffer', 'srcX0', 'srcY0', 'srcX1', 'srcY1', 'dstX0', 'dstY0', 'dstX1', 'dstY1', 'mask', 'filter'])
def glBlitNamedFramebuffer(readFramebuffer, drawFramebuffer, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter):
pass
@params(api='gl', prms=['index', 'x', 'y', 'z'])
def glVertexAttrib3d(index, x, y, z):
pass
@params(api='gl', prms=['barriers'])
def glMemoryBarrier(barriers):
pass
@params(api='gl', prms=['program', 'name'])
def glGetFragDataLocation(program, name):
pass
@params(api='gl', prms=['face', 'pname', 'params'])
def glGetMaterialfv(face, pname, params):
pass
@params(api='gl', prms=['map', 'mapsize', 'values'])
def glPixelMapuiv(map, mapsize, values):
pass
@params(api='gl', prms=['texture', 'level', 'xoffset', 'yoffset', 'zoffset', 'width', 'height', 'depth', 'format', 'type', 'data'])
def glClearTexSubImage(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, type, data):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameterIiv(texture, pname, params):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttribI4ubv(index, v):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix4x2dv(program, location, count, transpose, value):
pass
@params(api='gl', prms=['shader'])
def glIsShader(shader):
pass
@params(api='gl', prms=['cap'])
def glEnable(cap):
pass
@params(api='gl', prms=['program', 'uniformCount', 'uniformIndices', 'pname', 'params'])
def glGetActiveUniformsiv(program, uniformCount, uniformIndices, pname, params):
pass
@params(api='gl', prms=['buf', 'mode'])
def glBlendEquationi(buf, mode):
pass
@params(api='gl', prms=['program', 'name'])
def glGetAttribLocation(program, name):
pass
@params(api='gl', prms=['index', 'v'])
def glVertexAttrib4dv(index, v):
pass
@params(api='gl', prms=['texture', 'pname', 'params'])
def glGetTextureParameteriv(texture, pname, params):
pass
@params(api='gl', prms=['program', 'location', 'v0', 'v1', 'v2'])
def glProgramUniform3ui(program, location, v0, v1, v2):
pass
@params(api='gl', prms=['program', 'location', 'count', 'transpose', 'value'])
def glProgramUniformMatrix2x3fv(program, location, count, transpose, value):
pass
@params(api='gl', prms=[])
def glPushMatrix():
pass
@params(api='gl', prms=['program', 'location', 'v0'])
def glProgramUniform1i(program, location, v0):
pass
@params(api='gl', prms=['program', 'location', | |
nothing.',
'Most people think neither of Death nor nothingness.',
'Until they stand on the edge of life and see the Darkness.',
'I see. We must make an idol of our fear, and call it God.',
'You are uneasy.',
'Death visited me this morning. We are playing chess. This respite enables me to perform a vital errand.',
'My whole life has been a meaningless search. I say it without bitterness or self-reproach. I know it is the same for all. But I want to use my respite for one significant action.',
'So you play chess with Death?',
'He is a skillful tactician, but I have not yet lost one piece.',
'How can you outwit Death?',
'By a combination of bishop and knight. I will break his flank.',
'I shall remember that.',
'I will not lie below',
'Will you let me?',
'The Dead do not suffer the living to pass.',
'You will suffer me',
'The way is shut. It was made by those who are dead, and the dead keep it.',
'I summon you to fulfill your oath!',
'The secret is in a kiss. Here are no kisses. Here great Artemis Rules; only in the woodland may a man Hide his eyes from her, pledge himself to Pan. Come! through the tangled arches Of cypresses and larches, Stoop; under Artemis we walked upright; But this is Pans home, and the House of Night.',
'There is some gloom or doom, A bitter harsh ingredient In these my sorceries Of animal scent.',
'Yes! there is fear mixed with the fascination. It is the reverence that chastity, be sure! Gains from the impure.',
'O virtuous nation! It is the fear of the uninitiate Before the throne of Fate The hierophant.',
"- And no more? - What else? Forgiveness? Go to your Roman church for that, you'll find none here.",
"Have you imagined for one moment what this has been for me? An unforgivable transgression that has marked me for life.",
"You think you've suffered.",
"You think you know blood.",
"You think you've walked on corpses.",
"Spread them from here to the horizon and I have walked further! You weak, foul, lustful, vainglorious man.",
"How dare you presume to speak to me of death? Then we shall speak of it together.",
"Every man and every woman is a star.",
"These are fools that men adore; both their Gods & their men are fools.",
"I am above you and in you. My ecstasy is in yours. My joy is to see your joy.",
"Then saith the prophet and slave of the beauteous one: Who am I, and what shall be the sign? So she answered him, bending down, a lambent flame of blue, all-touching, all penetrant, her lovely hands upon the black earth, & her lithe body arched for love, and her soft feet not hurting the little flowers: Thou knowest! And the sign shall be my ecstasy, the consciousness of the continuity of existence, the omnipresence of my body.",
"I am divided for love's sake, for the chance of union.",
"Do what thou wilt shall be the whole of the Law.",
"I:40 This famous statement derives from several historic precedents, including that of François Rabelais in describing the rule of his Abbey of Thélème in Gargantua and Pantagruel: Fait ce que vouldras (Do what thou wilt), which was later used by the Hellfire Club established by Sir Francis Dashwood. It is also similar to the Wiccan proverb: An ye harm none, do what thou wilt; but the oldest known statement of a similar assertion is that of St. Augustine of Hippo: Love, and do what thou wilt.",
"Love is the law, love under will.",
"Sing the rapturous love-song unto me! Burn to me perfumes! Wear to me jewels! Drink to me, for I love you! I love you!",
"I am the blue-lidded daughter of Sunset; I am the naked brilliance of the voluptuous night-sky.",
"To me! To me!",
"The Manifestation of Nuit is at an end.",
"I am alone. There is no God where I am.",
"Now a curse upon Because and his kin!",
"May Because be accursed for ever!",
"...Wisdom says: be strong! Then canst thou bear more joy. Be not animal; refine thy rapture! If thou drink, drink by the eight and ninety rules of art: if thou love, exceed by delicacy; and if thou do aught joyous, let there be subtlety therein!",
"But exceed! exceed!",
"There is no law beyond Do what thou wilt.",
"There is a splendour in my name hidden and glorious, as the sun of midnight is ever the son.",
"The ending of the words is the Word Abrahadabra.",
"The Book of the Law is Written and Concealed.",
"Do what thou wilt shall be the whole of the Law. ",
"The study of this Book is forbidden. It is wise to destroy this copy after the first reading.",
"Whosoever disregards this does so at his own risk and peril. These are most dire.",
"The Comment, This section restates several phrases of the work as a whole, in a summary way.",
"There is no law beyond Do what thou wilt. ",
"Love is the law, love under will.",
"Nature always avenges herself on those who insult her",
"Nosferatu, the undead. He drinks the blood of his victims, and turns them into phantoms of the night.",
"Like a shadow, he has no reflection. He goes through walls and closed doors... as if they never existed.",
"Like a bat, he wafts into dark bedrooms. Masquerading as a black wolf, he hunts his fleeing victims.",
"Abandon all hope, you who cross his path.",
"He's bringing with him thousands and thousands of rats."
"From the seed of Belial is the vampire born.",
"He who feeds on the blood of mankind... who, unredeemed, taketh his refuge in caves, tombs... coffins filled with the unblessed soil of cemeteries... wherein the black death has reaped its harvest...the plague.",
"Blood is life.",
"I love the darkness and the shadows. Where I can be alone with my thoughts.",
"I am the descendent of an old family. Time is an abyss, profound as a thousand nights.",
"Centuries come and go... to be unable to grow old is terrible.",
"That is not the worst. There are things more horrible than that.",
"Can you imagine enduring centuries...experiencing the same futility each day?",
"And whoever enters into that land of phantoms is lost... and never returns.",
"Of vampires and bloodsuckers.",
"Of corpses which devour their own flesh.",
"Of incubuses and succubuses.",
"Of the living dead who follow strangers",
"in the night...and attack them.",
"Hmm...",
"beyond death... a curse that will last till the end of time.",
]
def randomResponse():
randomChoice = random.choice(list)
return(randomChoice)
bot = ChatBot(
'Goody',
filters=["chatterbot.filters.RepetitiveResponseFilter"],
logic_adapters=[
'chatterbot.logic.BestMatch'
],
storage_adapter='chatterbot.storage.MongoDatabaseAdapter',
database='GoodyCluster',
database_uri='mongodb://goody_user:WitchBitch808@goodycluster-shard-00-00-5ftk2.mongodb.net:27017,goodycluster-shard-00-01-5ftk2.mongodb.net:27017,goodycluster-shard-00-02-5ftk2.mongodb.net:27017/test?ssl=true&replicaSet=GoodyCluster-shard-0&authSource=admin&retryWrites=true&w=majority',
)
from chatterbot.trainers import ListTrainer
# First, lets train our bot with some data
trainer = ChatterBotCorpusTrainer(bot)
# trainer.train(
# #"chatterbot.corpus.english.greetings",
# "chatterbot.corpus.english.conversations"
# )
trainer = ListTrainer(bot)
trainer.train([
'When shall we three meet again? In thunder, lightning, or in rain? ',
'When the hurly-burly’s done, When the battle’s lost and won. ',
'That will be ere the set of sun. ',
'Where the place? ',
'Upon the heath. ',
'How are you?',
'By the pricking of my thumbs, Something wicked this way comes.',
'How?',
'Double, double, toil and trouble fire burn and cauldron bubble.',
' - This is unnatural providence.',
' - I know not that.',
' Does this not look like witchcraft?',
'Who does this then? Who?',
' <NAME> is a merry, merry king - He rules the land with mirth...',
'Please you, Mercy.',
' <NAME> has a mighty, mighty sting. He will knock thee to the Earth. Sing bah, bah. <NAME>, the black- Sing bah, bah, bah, bah, bah',
' Mercy.',
' Sing bah, bah, King Phillip, the black - | |
just 6 linear layers.
# if model_type == "CGBERT":
# optimizer_parameters.append(
# {'params': model.classifier.parameters(), 'lr': 1e-4},
# )
# elif model_type == "QACGBERT":
# new_lr = 2e-4
# optimizer_parameters.append(
# {'params': model.classifier.parameters(), 'lr': new_lr},
# )
# for layer_module in model.bert.encoder.layer:
# optimizer_parameters.extend([
# {'params': layer_module.attention.self.context_for_q.parameters(), 'lr': new_lr},
# {'params': layer_module.attention.self.context_for_k.parameters(), 'lr': new_lr},
# {'params': layer_module.attention.self.lambda_q_context_layer.parameters(), 'lr': new_lr},
# {'params': layer_module.attention.self.lambda_k_context_layer.parameters(), 'lr': new_lr},
# {'params': layer_module.attention.self.lambda_q_query_layer.parameters(), 'lr': new_lr},
# {'params': layer_module.attention.self.lambda_k_key_layer.parameters(), 'lr': new_lr},
# ])
# else:
# assert False
optimizer = BERTAdam(optimizer_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_steps)
return model, optimizer, tokenizer
def system_setups(args):
# system related setups
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
if args.accumulate_gradients < 1:
raise ValueError("Invalid accumulate_gradients parameter: {}, should be >= 1".format(
args.accumulate_gradients))
args.train_batch_size = int(args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.bert_config_file is not None:
bert_config = BertConfig.from_json_file(args.bert_config_file)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format(
args.max_seq_length, bert_config.max_position_embeddings))
# not preloading
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
# raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
# os.makedirs(args.output_dir, exist_ok=True)
output_log_file = os.path.join(args.output_dir, "log.txt")
print("output_log_file=",output_log_file)
if args.task_name == "sentihood_NLI_M":
with open(output_log_file, "w") as writer:
writer.write("epoch\tglobal_step\tloss\tt_loss\tt_acc\tstrict_acc\tf1\tauc\ts_acc\ts_auc\n")
else:
with open(output_log_file, "w") as writer:
writer.write("epoch\tglobal_step\tloss\tt_loss\tt_acc\taspect_P\taspect_R\taspect_F\ts_acc_4\ts_acc_3\ts_acc_2\n")
return device, n_gpu, output_log_file
def data_and_model_loader(device, n_gpu, args, sampler="randomWeight"):
processor = processors[args.task_name]()
label_list = processor.get_labels()
# training setup
train_examples = None
num_train_steps = None
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(train_examples) / args.train_batch_size * args.num_train_epochs)
# model and optimizer
model, optimizer, tokenizer = \
getModelOptimizerTokenizer(model_type=args.model_type,
vocab_file=args.vocab_file,
bert_config_file=args.bert_config_file,
init_checkpoint=args.init_checkpoint,
label_list=label_list,
do_lower_case=True,
num_train_steps=num_train_steps,
learning_rate=args.learning_rate,
base_learning_rate=args.base_learning_rate,
warmup_proportion=args.warmup_proportion)
# training set
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length,
tokenizer, args.max_context_length,
args.context_standalone, args)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_seq_len = torch.tensor([[f.seq_len] for f in train_features], dtype=torch.long)
all_context_ids = torch.tensor([f.context_ids for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_label_ids, all_seq_len, all_context_ids)
if args.local_rank == -1:
if sampler == "random":
train_sampler = RandomSampler(train_data)
else:
# consider switching to a weighted sampler
if args.task_name == "semeval_NLI_M":
sampler_weights = make_weights_for_balanced_classes(all_label_ids, 5)
else:
sampler_weights = make_weights_for_balanced_classes(all_label_ids, 2)
train_sampler = WeightedRandomSampler(sampler_weights, len(train_data), replacement=True)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size)
# test set
test_examples = processor.get_test_examples(args.data_dir)
test_features = convert_examples_to_features(
test_examples, label_list, args.max_seq_length,
tokenizer, args.max_context_length,
args.context_standalone, args)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
all_seq_len = torch.tensor([[f.seq_len] for f in test_features], dtype=torch.long)
all_context_ids = torch.tensor([f.context_ids for f in test_features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_label_ids, all_seq_len, all_context_ids)
test_dataloader = DataLoader(test_data, batch_size=args.eval_batch_size, shuffle=False)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
model.to(device)
return model, optimizer, train_dataloader, test_dataloader
def evaluate_fast(test_dataloader, model, device, n_gpu, args):
"""
evaluate only and not recording anything
"""
model.eval()
test_loss, test_accuracy = 0, 0
nb_test_steps, nb_test_examples = 0, 0
pbar = tqdm(test_dataloader, desc="Iteration")
y_true, y_pred, score = [], [], []
# we don't need gradient in this case.
with torch.no_grad():
for _, batch in enumerate(pbar):
if torch.cuda.is_available():
torch.cuda.empty_cache()
# truncate to save space and computing resource
input_ids, input_mask, segment_ids, label_ids, seq_lens, \
context_ids = batch
max_seq_lens = max(seq_lens)[0]
input_ids = input_ids[:,:max_seq_lens]
input_mask = input_mask[:,:max_seq_lens]
segment_ids = segment_ids[:,:max_seq_lens]
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
seq_lens = seq_lens.to(device)
context_ids = context_ids.to(device)
# intentially with gradient
tmp_test_loss, logits, _, _, _, _ = \
model(input_ids, segment_ids, input_mask, seq_lens,
device=device, labels=label_ids,
context_ids=context_ids)
logits = F.softmax(logits, dim=-1)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
outputs = np.argmax(logits, axis=1)
tmp_test_accuracy=np.sum(outputs == label_ids)
y_true.append(label_ids)
y_pred.append(outputs)
score.append(logits)
test_loss += tmp_test_loss.mean().item()
test_accuracy += tmp_test_accuracy
nb_test_examples += input_ids.size(0)
nb_test_steps += 1
test_loss = test_loss / nb_test_steps
test_accuracy = test_accuracy / nb_test_examples
# we follow previous works in calculating the metrics
y_true = np.concatenate(y_true, axis=0)
y_pred = np.concatenate(y_pred, axis=0)
score = np.concatenate(score, axis=0)
logger.info("***** Fast Evaluation results *****")
result = collections.OrderedDict()
# handling corner case for a checkpoint start
result = {'test_loss': test_loss,
'test_accuracy': test_accuracy}
# for ABSA tasks, we need more evaluations
if args.task_name == "sentihood_NLI_M":
aspect_strict_Acc = sentihood_strict_acc(y_true, y_pred)
aspect_Macro_F1 = sentihood_macro_F1(y_true, y_pred)
aspect_Macro_AUC, sentiment_Acc, sentiment_Macro_AUC = sentihood_AUC_Acc(y_true, score)
result = {'aspect_strict_Acc': aspect_strict_Acc,
'aspect_Macro_F1': aspect_Macro_F1,
'aspect_Macro_AUC': aspect_Macro_AUC,
'sentiment_Acc': sentiment_Acc,
'sentiment_Macro_AUC': sentiment_Macro_AUC}
else:
aspect_P, aspect_R, aspect_F = semeval_PRF(y_true, y_pred)
sentiment_Acc_4_classes = semeval_Acc(y_true, y_pred, score, 4)
sentiment_Acc_3_classes = semeval_Acc(y_true, y_pred, score, 3)
sentiment_Acc_2_classes = semeval_Acc(y_true, y_pred, score, 2)
result = {'aspect_P': aspect_P,
'aspect_R': aspect_R,
'aspect_F': aspect_F,
'sentiment_Acc_4_classes': sentiment_Acc_4_classes,
'sentiment_Acc_3_classes': sentiment_Acc_3_classes,
'sentiment_Acc_2_classes': sentiment_Acc_2_classes}
for key in result.keys():
logger.info(" %s = %s\n", key, str(result[key]))
return -1
def evaluate(test_dataloader, model, device, n_gpu, nb_tr_steps, tr_loss, epoch,
global_step, output_log_file, global_best_acc, args):
model.eval()
test_loss, test_accuracy = 0, 0
nb_test_steps, nb_test_examples = 0, 0
pbar = tqdm(test_dataloader, desc="Iteration")
y_true, y_pred, score = [], [], []
# we don't need gradient in this case.
with torch.no_grad():
for _, batch in enumerate(pbar):
if torch.cuda.is_available():
torch.cuda.empty_cache()
# truncate to save space and computing resource
input_ids, input_mask, segment_ids, label_ids, seq_lens, \
context_ids = batch
max_seq_lens = max(seq_lens)[0]
input_ids = input_ids[:,:max_seq_lens]
input_mask = input_mask[:,:max_seq_lens]
segment_ids = segment_ids[:,:max_seq_lens]
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
seq_lens = seq_lens.to(device)
context_ids = context_ids.to(device)
# intentially with gradient
tmp_test_loss, logits, _, _, _, _ = \
model(input_ids, segment_ids, input_mask, seq_lens,
device=device, labels=label_ids,
context_ids=context_ids)
logits = F.softmax(logits, dim=-1)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
outputs = np.argmax(logits, axis=1)
tmp_test_accuracy=np.sum(outputs == label_ids)
y_true.append(label_ids)
y_pred.append(outputs)
score.append(logits)
test_loss += tmp_test_loss.mean().item()
test_accuracy += tmp_test_accuracy
nb_test_examples += input_ids.size(0)
nb_test_steps += 1
test_loss = test_loss / nb_test_steps
test_accuracy = test_accuracy / nb_test_examples
# we follow previous works in calculating the metrics
y_true = np.concatenate(y_true, axis=0)
y_pred = np.concatenate(y_pred, axis=0)
score = np.concatenate(score, axis=0)
logger.info("***** Evaluation results *****")
result = collections.OrderedDict()
# handling corner case for a checkpoint start
if nb_tr_steps == 0:
loss_tr = 0.0
else:
loss_tr = tr_loss/nb_tr_steps
# for ABSA tasks, we need more evaluations
if args.task_name == "sentihood_NLI_M":
aspect_strict_Acc = sentihood_strict_acc(y_true, y_pred)
aspect_Macro_F1 = sentihood_macro_F1(y_true, y_pred)
aspect_Macro_AUC, sentiment_Acc, sentiment_Macro_AUC = sentihood_AUC_Acc(y_true, score)
result = {'epoch': epoch,
'global_step': global_step,
'loss': loss_tr,
'test_loss': test_loss,
'test_accuracy': test_accuracy,
'aspect_strict_Acc': aspect_strict_Acc,
'aspect_Macro_F1': aspect_Macro_F1,
'aspect_Macro_AUC': aspect_Macro_AUC,
'sentiment_Acc': sentiment_Acc,
'sentiment_Macro_AUC': sentiment_Macro_AUC}
elif args.task_name in ["fiqa_headline", "fiqa_post", "fiqa_acd"]:
p, r, f = fiqa_PRF(y_true, y_pred)
result = {'epoch': epoch,
'global_step': global_step,
'loss': loss_tr,
'test_loss': test_loss,
'test_accuracy': test_accuracy,
'P': p,
'R': r,
'F1': f
}
else:
aspect_P, aspect_R, aspect_F = semeval_PRF(y_true, y_pred)
sentiment_Acc_4_classes = semeval_Acc(y_true, y_pred, score, 4)
sentiment_Acc_3_classes = semeval_Acc(y_true, y_pred, score, 3)
sentiment_Acc_2_classes = semeval_Acc(y_true, y_pred, score, 2)
result = {'epoch': epoch,
'global_step': global_step,
'loss': loss_tr,
'test_loss': test_loss,
'test_accuracy': test_accuracy,
'aspect_P': aspect_P,
'aspect_R': aspect_R,
'aspect_F': aspect_F,
'sentiment_Acc_4_classes': sentiment_Acc_4_classes,
'sentiment_Acc_3_classes': sentiment_Acc_3_classes,
'sentiment_Acc_2_classes': sentiment_Acc_2_classes}
with open(output_log_file, "a+") as writer:
for key in result.keys():
logger.info(" %s = %s\n", key, str(result[key]))
writer.write("%s\t" % (str(result[key])))
writer.write("\n")
# save for each time point
if args.output_dir:
torch.save(model.state_dict(), args.output_dir + "checkpoint_" + str(global_step) + ".bin")
if args.task_name == "sentihood_NLI_M":
if aspect_strict_Acc > global_best_acc:
torch.save(model.state_dict(), args.output_dir + "best_checkpoint.bin")
global_best_acc = aspect_strict_Acc
elif args.task_name in ["fiqa_headline", "fiqa_post", "fiqa_acd"]:
if f > global_best_acc:
torch.save(model.state_dict(), args.output_dir + "best_checkpoint.bin")
global_best_acc = f
else:
if aspect_F > global_best_acc:
torch.save(model.state_dict(), args.output_dir + "best_checkpoint.bin")
global_best_acc = aspect_F
return global_best_acc
def step_train(train_dataloader, test_dataloader, model, optimizer,
device, n_gpu, evaluate_interval, global_step,
output_log_file, epoch, global_best_acc, args):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
pbar = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(pbar):
model.train()
| |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
from utils import outputActivation
import pdb
# Customizations
# - DONE Embeddings: linear transform d_feats -> d_model features
# - DONE Generator
# - DONE Batching
# DONE: add social context
# DONE : use maneuvers
# - GeneratorLat and GeneratorLon DONE
# - Embeddings with traj/grid/lat/lon features DONE
# ---------- EMBEDDINGS ----------
class Embeddings(nn.Module):
def __init__(self, d_model, src_feats, src_ngrid=0, src_grid=(13,3), src_lon=0, src_lat=0, soc_emb_size=0):
super(Embeddings, self).__init__()
#self.lut = nn.Embedding(vocab, d_model)
self.d_model = copy.copy(d_model)
self.traj_emb = None
self.grid_emb = None
self.lat_emb = None
self.lon_emb = None
self.soc_emb = None
self.soc_emb_size = soc_emb_size
# Baiscally out of the 512 features for d_model encoding we split as:
# 256 features for ego traj inputs
# 256 features for social context (occupancy grid) inputs
# Additionaly we may reserve 20 features (3*4+2*4) for maneuveurs used as inputs
# Or just 512 features for taj_emb (eg at the output)
if src_ngrid > 0: # handle 2D input features with conv net
d_model_grid = d_model//2
d_model -= d_model_grid
# We start with [Batch, src_ngrid, 13, 3]
self.conv1 = torch.nn.Conv2d(src_ngrid, 64, 3) # => [64, 11, 1]
self.conv2 = torch.nn.Conv2d(64, 16, (3,1)) # => [16, 9, 1]
self.maxpool = torch.nn.MaxPool2d((2,1),padding = (1,0)) # => [16, 5, 1]
self.leaky_relu = torch.nn.LeakyReLU(0.1)
self.grid_emb = torch.nn.Linear(5, d_model_grid) # 5 from [16, 5, 1]
if soc_emb_size > 0:
self.soc_emb = torch.nn.Linear(soc_emb_size, d_model_grid) # projection
if src_lon > 0:
d_model_lon = src_lon * 4
d_model -= d_model_lon
self.lon_emb = torch.nn.Linear(src_lon, d_model_lon)
if src_lat > 0:
d_model_lat = src_lat * 4
d_model -= d_model_lat
self.lat_emb = torch.nn.Linear(src_lat, d_model_lat)
self.traj_emb = torch.nn.Linear(src_feats, d_model)
def forward(self, x):
# workaround to make nn.Sequential work with multiple inputs
# cf https://discuss.pytorch.org/t/nn-sequential-layers-forward-with-multiple-inputs-error/35591/3
#x, soc = x[0], x[1]
traj, grid, lon, lat = x
emb = self.traj_emb(traj) # * math.sqrt(self.d_model)
if grid is not None:
if len(grid.shape) == 3: # 1D input
assert self.soc_emb is not None
soc_emb = self.soc_emb(grid) # * math.sqrt(self.d_model)
emb = torch.cat((emb, soc_emb), dim=-1)
else: # 2D input
assert self.grid_emb is not None
## Apply convolutional social pooling: => [128, 16, 5, 1]
grid_enc = self.maxpool(self.leaky_relu(self.conv2(self.leaky_relu(self.conv1(grid)))))
grid_enc = torch.squeeze(grid_enc) # [128, 16, 5]
grid_emb = self.grid_emb(grid_enc)
emb = torch.cat((emb, grid_emb), dim=-1)
if lon is not None:
assert self.lon_emb is not None
lon_emb = self.lon_emb(lon) # * math.sqrt(self.d_model)
emb = torch.cat((emb, lon_emb), dim=-1)
if lat is not None:
assert self.lat_emb is not None
lat_emb = self.lat_emb(lat) # * math.sqrt(self.d_model)
emb = torch.cat((emb, lat_emb), dim=-1)
#print("EMB:", emb.shape)
return emb # * math.sqrt(self.d_model)
#return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# ---------- COMMON LAYERS for encoder/decoder ----------
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
# XXX return x + self.dropout(sublayer(self.norm(x)))
# XXX Normalize after residual cnx like in the paper
return self.norm(x + self.dropout(sublayer(x)))
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# ---------- ENCODER ----------
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ---------- DECODER ----------
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# ---------- ENCODER/DECODER ----------
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator=None, generator_lat=None, generator_lon=None):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
self.generator_lat = generator_lat
self.generator_lon = generator_lon
def forward(self, src, tgt, src_mask, tgt_mask, src_grid=None, src_lon=None, src_lat=None):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask, src_grid, src_lon, src_lat), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask, src_grid=None, src_lon=None, src_lat=None):
return self.encoder(self.src_embed((src, src_grid, src_lon, src_lat)), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed((tgt, None, None, None)), memory, src_mask, tgt_mask)
#def prepare_infer(self, Ty, batch_size):
# self.ys_masks = []
# self.Ty = Ty
# for i in range(Ty):
# ys_mask = np.ones( (i+1, i+1), dtype='uint8')
# ys_mask = np.tril(ys_mask, 0)
# ys_mask = np.repeat(ys_mask[np.newaxis, :, :], batch_size, axis=0)
# ys_mask = torch.from_numpy(ys_mask)
# if torch.cuda.is_available():
# ys_mask = ys_mask.cuda()
# self.ys_masks.append(ys_mask)
def infer(self, model, src, src_mask, Ty, src_grid=None, src_lon=None, src_lat=None):
m, Tx, nx = src.shape
memory = model.encode(src, src_mask, src_grid, src_lon, src_lat) # [Batch 128, Tx 16, d_model 512]
ys = src[:, -1, 0:2].unsqueeze(1) # [Batch 128, ys.size(1) 1, X/Y 2]
for i in range(Ty):
ys_mask = np.ones( (ys.size(1), ys.size(1)), dtype='uint8')
ys_mask = np.tril(ys_mask, 0)
ys_mask = np.repeat(ys_mask[np.newaxis, :, :], m, axis=0)
ys_mask = torch.from_numpy(ys_mask)
if torch.cuda.is_available():
ys_mask = ys_mask.cuda()
#out = model.decode(memory, src_mask, ys, self.ys_masks[i]) # [Batch 128, ys.size(1), d_model 512]
# Last batch is usually not of size batch_size ...
out = model.decode(memory, src_mask, ys, ys_mask) # [Batch , ys.size(1), d_model 512]
fut_pred = model.generator(out) # [ys.size(1), Batch 128, gaussian_params 5]
fut_pred = fut_pred.permute(1, 0, 2) # [Batch 128, ys.size(1), gaussian_params 5]
next_y = fut_pred[:, -1, 0:2].unsqueeze(1) # [Batch 128, 1, muX/muY 2]
ys = torch.cat( (ys, next_y), dim=1) # [Batch 128, ys.size(1)+1, 2]
fut_pred = fut_pred.permute(1, 0, 2) # [Ty 25, Batch 128, 5]
return fut_pred
# ---------- GENERATOR: for final output ----------
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_params):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, tgt_params)
def forward(self, x):
# params: [batch 128, Ty 25, bivariate gaussian params 5]
fut_pred = self.proj(x)
# fut_pred: [Ty 25, batch 128, 5] via permute
fut_pred = fut_pred.permute(1, 0, 2)
fut_pred = outputActivation(fut_pred)
# fut_pred: [Ty 25, batch 128, bivariate gaussian params 5] via outputActivation which enforces pred constraints
return fut_pred
#return F.log_softmax(self.proj(x), dim=-1)
class GeneratorLat(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_lat_classes):
super(GeneratorLat, self).__init__()
# 3 classes: right, left, none
self.proj = nn.Linear(d_model, tgt_lat_classes)
def forward(self, x):
lat_pred = F.softmax(self.proj(x), dim=-1) # [Batch 128, Ty, 3]
lat_pred = lat_pred[:, -1, :]
lat_pred = torch.squeeze(lat_pred)
return lat_pred # [Batch 128, 3]
class GeneratorLon(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, tgt_lon_classes):
super(GeneratorLon, self).__init__()
# 2 classes: braking or not
self.proj = nn.Linear(d_model, 2, tgt_lon_classes)
def forward(self, x):
lon_pred = F.softmax(self.proj(x), dim=-1)
lon_pred = lon_pred[:, -1, :]
lon_pred = torch.squeeze(lon_pred)
return lon_pred # [Batch 128, 2]
# ---------- FULL MODEL ----------
# This model does not use lon/lat | |
#
#-*- coding: utf-8 -*-
#
# -------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
import math
import numpy as np
# import matplotlib
# matplotlib.use('agg')
# import matplotlib.pylab as plt
def solveForComponents(fc, pm, kphi, kvco, N, gamma, loop_type='passive2'):
"""
:Parameters:
loop_type (str) -
* passive2 - 2nd order passive
* passive3 - 3rd order passive
* passive4 - 4th order passive
* active2 - 2nd order active
* active3 - 3rd order active
* active4 - 4th order active
fc (float) - 0dB crossover frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (1.024 default)
"""
if loop_type == 'passive2':
pll = PllSecondOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive3':
pll = PllThirdOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
elif loop_type == 'passive4':
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=gamma )
d = pll.calc_components()
return d
class PllSecondOrderPassive( object ):
""" The 2nd order passive phase locked loop object
"""
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.024)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
def calc_components(self):
""" return a dict with the component values """
d = {}
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t2'] = self.calc_t2(self.fc,
d['t1'],
self.gamma)
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'])
d['c1'] = self.calc_c1(d['a0'],
d['t1'],
d['t2'])
d['c2'] = self.calc_c2(d['a0'],
d['c1'])
d['r2'] = self.calc_r2(d['c2'],
d['t2'])
d['a1'] = self.calc_a1(d['c1'],
d['c2'],
d['r2'])
d['a2'] = 0
d['a3'] = 0
d['r3'] = 0
d['r4'] = 0
d['c3'] = 0
d['c4'] = 0
d['t3'] = 0
d['t4'] = 0
return d
def calc_t1(self, fc, pm, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
phi = np.pi*pm/180
t1 = (np.sqrt(((1+gamma)**2)*(np.tan(phi))**2 + 4*gamma) - (1+gamma)*np.tan(phi)) / (2*omega_c)
return t1
def calc_t2(self, fc, t1, gamma=1.024):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
t1 (float) - time constant t1 in seconds
gamma (float) - optimization factor (default=1.024)
"""
omega_c = 2*np.pi*fc
return gamma/((omega_c**2)*t1)
def calc_a0(self, kphi, kvco, N, fc, t1, t2):
"""
:Parameters:
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
fc (float) - 0dB crossover frequency in Hz
t1 (float) - time constant t1 in seconds
t2 (float) - time constant t2 in seconds
"""
omega_c = 2*np.pi*fc
x = (kphi*kvco)/(N*omega_c**2)
y_num = np.sqrt(1+(omega_c**2)*(t2**2))
y_den = np.sqrt(1+(omega_c**2)*(t1**2))
a0 = x*y_num/y_den
return a0
def calc_c1(self, a0, t1, t2):
"""
:Parameters:
a0 (float) - loop filter coefficient
t1 (float) - time constant t1 in seconds
(t2 (float) - time constant t2 in seconds
"""
return a0*t1/t2
def calc_c2(self, a0, c1):
"""
:Parameters:
a0 (float) - loop filter coefficient
c1 (float) - capacitor in Farads
"""
return a0-c1
def calc_r2(self, c2, t2):
"""
:Parameters:
c2 (float) - capacitor in Farads
t2 (float) - time constant t2 in seconds
"""
return t2/c2
def calc_a1(self, c1, c2, r2):
"""
:Parameters:
c1 (float) - capacitor in Farads
c2 (float) - capacitor in Farads
r2 (float) - resistor in Ohms
"""
return c1*c2*r2
class PllThirdOrderPassive(PllSecondOrderPassive):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.136,
t31=0.6):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.136)
t31 (float) - ratio of T3 to T1 (default=0.6)
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 = t31
def calc_components(self):
""" return a dict with the component values and coefficients """
d = {}
omega_c = 2*np.pi*self.fc
# solve for time constants
d['t1'] = self.calc_t1(self.fc,
self.pm,
self.gamma)
d['t3'] = d['t1']*self.t31
d['t2'] = self.gamma/( (omega_c**2)*(d['t1'] + d['t3'] ) )
# solve for coefficients
d['a0'] = self.calc_a0(self.kphi,
self.kvco,
self.N,
self.fc,
d['t1'],
d['t2'],
d['t3'])
d['a1'] = d['a0']*(d['t1'] + d['t3'])
d['a2'] = d['a0']*d['t1']*d['t3']
# solve for components
d['c1'] = self.calc_c1(d['a0'],
d['a1'],
d['a2'],
d['t2'])
d['c3'] = self.calc_c3( d['a0'],
d['a1'],
d['a2'],
d['t2'],
d['c1'] )
d['c2'] = d['a0'] - d['c1'] - d['c3']
d['r2'] = d['t2']/d['c2']
d['r3'] = d['a2']/(d['c1']*d['c3']*d['t2'])
d['t4'] = 0
d['a3'] = 0
d['r4'] = 0
d['c4'] = 0
return d
def calc_c3( self,
a0,
a1,
a2,
t2,
c1 ):
return ( -(t2**2)*(c1**2) + t2*a1*c1 - a2*a0 )/( (t2**2)*c1 - a2 )
def calc_c1( self,
a0,
a1,
a2,
t2 ):
return (a2/(t2**2))*(1 + np.sqrt(1 + (t2/a2)*(t2*a0 - a1) ) )
def calc_a0( self,
kphi,
kvco,
N,
fc,
t1,
t2,
t3 ):
omega_c = 2*np.pi*fc
k1 = kphi*kvco/((omega_c**2)*(N))
k2 = np.sqrt( (1+(omega_c*t2)**2)/((1+(omega_c*t1)**2)*(1+(omega_c*t3)**2) ) )
return k1*k2
def calc_t1(self,
fc,
pm,
gamma,
t31=0.6,
num_iters=100):
""" numerically solve for t1 using the bisection method
see: https://en.wikibooks.org/wiki/Numerical_Methods/Equation_Solving
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
gamma (float) - optimization factor (1.136)
num_iters (int) - number of times to loop
"""
a = 1e-15 # initial guess for a
b = 1.0 # initial guess for b
fa = self.func_t1(a,fc,pm,t31=t31,gamma=gamma)
fb = self.func_t1(b,fc,pm,t31=t31,gamma=gamma)
for i in range(num_iters):
guess = (a+b)/2
if (self.func_t1(guess,fc,pm,t31=t31,gamma=gamma) < 0):
b = guess
else:
a = guess
return guess
def func_t1(self,
x,
fc,
pm,
t31=0.6,
gamma=1.136):
""" simulate t1. This function is used to
numerically solve for T1.
Equation 22.31 in Dean Banerjee's Book
:Parameters:
x (float) - guess at t1
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
t31 (float) - ratio of t3 to t1
gamma (float) - optimization factor (1.136)
:Returns:
updated value for t1 based on guess (float)
"""
omega_c = 2*np.pi*fc
phi = pm*np.pi/180
val = np.arctan( gamma/(omega_c*x*(1+t31)) ) - \
np.arctan( omega_c*x ) - \
np.arctan( omega_c*x*t31 ) - phi
return val
def test4thOrderPassive( t31=0.4, t43=0.4 ):
fc = 10e3
pm = 47.8
kphi = 4e-3
kvco = 20e6
fout = 900e6
fpfd = 200e3
N = float(fout)/fpfd
fstart = 10
fstop = 100e6
ptsPerDec = 100
fref = 10e6
R = int(fref/fpfd)
# R = 1
pll = PllFourthOrderPassive( fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=t31,
t43=t43)
d = pll.calc_components()
# return d
flt = {
'c1':d['c1'],
'c2':d['c2'],
'c3':d['c3'],
'c4':d['c4'],
'r2':d['r2'],
'r3':d['r3'],
'r4':d['r4'],
'flt_type':"passive"
}
f,g,p,fz,pz,ref_cl,vco_cl = simulatePll( fstart,
fstop,
ptsPerDec,
kphi,
kvco,
N,
R,
filt=flt)
return d, fz, pz
class PllFourthOrderPassive( PllSecondOrderPassive ):
def __init__(self,
fc,
pm,
kphi,
kvco,
N,
gamma=1.115,
t31=0.107,
t43=0.107):
"""
:Parameters:
fc (float) - cutoff frequency in Hz
pm (float) - phase margin in degrees
kphi (float) - charge pump gain in Amps per radian
kvco (float) - vco tuning sensitivity in Hz/V
N (int) - loop multiplication ratio
gamma (float) - optimization factor (default=1.115)
t31 (float) - ratio of T3 to T1 (default=0.4)
t43 (float) - ratio of T4 to T3 (default=0.4)
note: for a realizable solution, t31 + t43 <= 1
"""
self.fc = fc
self.pm = pm
self.kphi = kphi
self.kvco = kvco
self.N = N
self.gamma = gamma
self.t31 | |
GAAGCAAAGT (non-indel_read_2)
For indel_read_1 and non-indel_read_1,
left edit_dist(TAGTAT, TAGTAT) = 0
right edit_dist(AAGCAA, GAAGCA) = 2
indel complexity = left edit_dist + right edit_dist
= 2
Calculate for indel_read_i nad non-indel_read_j
and take mininum.
"""
# first compare with reference
# if indel complexity against ref is 0, return 0.
# -> O(N) (N: num of indel reads)
complexities = []
indel_reads = self.generate_indel_reads()
ref_reads = self.generate_ref_reads()
for idl, ref in zip(indel_reads, ref_reads):
if len(idl.lt_seq) >= n and len(idl.rt_seq) >= n:
lt = idl.lt_seq[-n:]
lt_ref = ref.lt_seq[-n:]
lt_edit_dist = editdistance(lt, lt_ref)
rt = idl.rt_seq[:n]
rt_ref = ref.rt_seq[:n]
rt_edit_dist = editdistance(rt, rt_ref)
complexity = lt_edit_dist + rt_edit_dist
complexities.append(complexity)
else:
pass
if complexities == []:
return 0
indel_complexity_against_ref = min(complexities)
if indel_complexity_against_ref == 0:
return 0
# indel_complexity_against_ref > 0, check for SNP-induced compleixty
# -> O(NxM) (M: num of non-indel reads)
complexities = []
indel_reads = self.generate_indel_reads()
non_reads = self.generate_non_indel_reads()
for idl in indel_reads:
if len(idl.lt_seq) >= n and len(idl.rt_seq) >= n:
for non in non_reads:
if len(non.lt_seq) >= n and len(non.rt_seq) >= n:
lt = idl.lt_seq[-n:]
lt_non = non.lt_seq[-n:]
lt_edit_dist = editdistance(lt, lt_non)
rt = idl.rt_seq[:n]
rt_non = non.rt_seq[:n]
rt_edit_dist = editdistance(rt, rt_non)
complexity = lt_edit_dist + rt_edit_dist
complexities.append(complexity)
if complexities == []:
return indel_complexity_against_ref
else:
refined_value = min(complexities)
return min(indel_complexity_against_ref, refined_value)
class CodingSequenceWithIndel(SequenceWithIndel):
"""Represents indel annotated with gene info
Attributes:
strand (str): '+' for positive strand '-' for negative
accession (str): RefSeq accession number (e.g. NM_****)
gene_symbol (str): gene name
exon (int): exon number. 1 is the first exon
exon_start (int): the exon start pos on genome coordinate
exon_end (int): the exon end pos on genome coordinate
last_exon (int): 1 if the current exon is the last exon, 0 otherwise
cds_start (int): the pos of coding sequence (cds) starting at the exon_start
prev_exon_start (int): (current - 1) exon start pos on genome coordinate
-1 if current = 1 (first exon)
prev_exon_end (int): (current - 1) exon end pos on genome coordinate
-1 if current = 1
next_exon_start (int): (current + 1) exon start pos on genome coordinate
-1 if current = last exon
next_exon_end (int): (current + 1) exon end pos on genome coordinate
-1 if current = last exon
"""
def __init__(
self,
chr,
pos,
idl_type,
lt_seq,
idl_seq,
rt_seq,
strand,
accession,
gene_symbol,
exon,
exon_start,
exon_end,
last_exon,
cds_start,
prev_exon_start,
prev_exon_end,
next_exon_start,
next_exon_end,
):
SequenceWithIndel.__init__(self, chr, pos, idl_type, lt_seq, idl_seq, rt_seq)
self.strand = strand
self.accession = accession
self.gene_symbol = gene_symbol
self.exon = exon
self.exon_start = exon_start
self.exon_end = exon_end
self.last_exon = last_exon
self.cds_start = cds_start
self.prev_exon_start = prev_exon_start
self.prev_exon_end = prev_exon_end
self.next_exon_start = next_exon_start
self.next_exon_end = next_exon_end
def is_nmd_insensitive(self):
"""Nonsense-mediatate decay (NMD) insensitivity
Args:
None
Returns:
is_insensitive (int): 1 if insensitive 0 otherwise
"""
is_insensitive = 0
if self.exon == 1 or self.exon == self.last_exon:
is_insensitive = 1
return is_insensitive
def effect(self):
"""Report indel annotation based on the region where
indel is annotated.
Possible regions:
Exon,
Splice site (0 < dist.to exon boundary < 3)
Splice region (2 < dist.to exon boundary < 11)
Args:
None
Returns:
indel annotation (str): see Example
Example:
SDF4|NM_016547|167|frameshiftTruncating|0
Pipe-delimited string reports GeneName, Accession,
Codon pos, Effect and NMD-insensitivity.
"""
if self.strand == "+":
if self.exon_start <= self.pos <= self.exon_end:
return self.exonic_on_pos_strand()
elif (
0 < self.exon_start - self.pos <= 2 or 0 < self.pos - self.exon_end <= 2
):
return self.splice_site_on_pos_strand()
elif (
2 < self.exon_start - self.pos <= 11
or 2 < self.pos - self.exon_end <= 11
):
return self.splice_region_on_pos_strand()
else:
pass
else:
if self.exon_start <= self.pos <= self.exon_end:
return self.exonic_on_neg_strand()
elif (
0 < self.exon_start - self.pos <= 2 or 0 < self.pos - self.exon_end <= 2
):
return self.splice_site_on_neg_strand()
elif (
2 < self.exon_start - self.pos <= 11
or 2 < self.pos - self.exon_end <= 11
):
return self.splice_region_on_neg_strand()
else:
pass
def cds_pos_in_exonic_indels(self):
"""Report coding sequence (CDS) pos affected by indel
Args:
None
Returns:
cds pos (int): The first coding sequence base affected by the indel
Example: 1234567890123
CDS : ATGCTACGACTGA
del : ATGCTA---CTGA -> cds_pos = 7
123456 7890123
CDS : ATGCTA CGACTGA
ins : ATGCTATAGCGACTGA -> cds_pos = 7
Note that the sequences are unaffected upto first 6 bases.
"""
# insertion/deletion on positive strand
if self.strand == "+":
cds_pos = self.cds_start + self.pos - self.exon_start
else:
# insertion on negative strand
if self.idl_type == 1:
cds_pos = self.cds_start + self.exon_end - (self.pos - 1)
# deletion on negative strand
else:
cds_pos = (
self.cds_start + self.exon_end - self.pos - (len(self.idl_seq) - 1)
)
return cds_pos
def exonic_on_pos_strand(self):
"""Annotate coding exon indel on positve strand
Args:
None
Returns:
indel annotation (str): gene|acc|codon_pos|effect|nmd_insensitivity
possible effect: frameshiftTruncating
inframeDel
inframeIns
nonsenseTruncating
spliceTruncating (the GT-AG motif broken)
splicePreserving (the GT-AG motif preserved)
The splice effect is possible when insertion occurs at the 5'exon
boundary.
"""
# insertion at 5'exon_start
if self.idl_type == 1 and self.pos == self.exon_start:
cds_pos = self.cds_start - 1
codon_pos = int(cds_pos / 3) + 1
if len(self.idl_seq) > 1 and self.idl_seq[-2:] == "AG":
return codon_pos, "splicePreserving"
else:
return codon_pos, "spliceTruncating"
# indels within exon
else:
cds_pos = self.cds_pos_in_exonic_indels()
frame = (cds_pos - 1) % 3
if frame == 2:
codon_pos = int(cds_pos / 3)
else:
codon_pos = int(cds_pos / 3) + 1
# insertion
if self.idl_type == 1:
if frame == 0:
seq = self.idl_seq + self.rt_seq[:2]
elif frame == 1:
seq = self.lt_seq[-1:] + self.idl_seq + self.rt_seq[:1]
else:
seq = self.lt_seq[-2:] + self.idl_seq + self.rt_seq[:3]
# deletion
else:
if frame == 0:
seq = self.rt_seq[:3]
elif frame == 1:
seq = self.lt_seq[-1:] + self.rt_seq[:2]
else:
seq = self.lt_seq[-2:] + self.rt_seq[:1]
# check for stop codon
if exists_stop_codon(self.strand, seq):
return codon_pos, "nonsenseTruncating"
else:
if len(self.idl_seq) % 3 == 0 and self.idl_type == 1:
return codon_pos, "inframeIns"
elif len(self.idl_seq) % 3 == 0 and self.idl_type == 0:
return codon_pos, "inframeDel"
else:
return codon_pos, "frameshiftTruncating"
def splice_site_on_pos_strand(self):
"""Annotate indel within 2-nt to exon boundary on positve strand
Args:
None
Returns:
indel annotation (str): gene|acc|codon_pos|effect|nmd_insensitivity
possible effect:
spliceShortIntron (for intron <= 5-nt)
splicePreserving (the GT-AG motif preserved)
spliceTruncating (the GT-AG motif broken)
spliceRegion (for ins at 2-nt upstream of 5'splice site)
"""
# splicing motif + at least 1 base
# GT + (at least one) + AG
min_motif_len = 5
# 5'splice
if self.exon_start > self.pos:
cds_pos = self.cds_start - 1
codon_pos = int(cds_pos / 3) + 1
if (self.exon_start - 1) - self.prev_exon_end <= min_motif_len:
return codon_pos, "spliceShortIntron"
else:
# insertion at 1-nt upstream of exon start
if self.idl_type == 1 and self.exon_start - self.pos == 1:
if self.idl_seq[-1] == "A":
return codon_pos, "splicePreserving"
else:
return codon_pos, "spliceTruncating"
# insertion at 2-nt upstream of exon start
elif self.idl_type == 1 and self.exon_start - self.pos == 2:
return codon_pos, "spliceRegion"
# deletion at 1-nt upstream of exon start
elif self.idl_type == 0 and self.exon_start - self.pos == 1:
return codon_pos, "spliceTruncating"
# deletion at 2-nt upstream of exon start
elif self.idl_type == 0 and self.exon_start - self.pos == 2:
if len(self.idl_seq) == 1 and self.lt_seq[-1] == "A":
return codon_pos, "splicePreserving"
elif len(self.idl_seq) == 2 and self.lt_seq[-2:] == "AG":
return codon_pos, "splicePreserving"
else:
return codon_pos, "spliceTruncating"
else:
pass
# 3'splice
else:
cds_pos = self.cds_start + self.exon_end - self.exon_start
codon_pos = int(cds_pos / 3) + 1
if self.next_exon_start - self.exon_end <= min_motif_len:
return codon_pos, "spliceShortIntron"
else:
# insertion 1-nt downstream of exon end
if self.idl_type == 1 and self.pos - self.exon_end == 1:
if len(self.idl_seq) > 1 and self.idl_seq[:2] == "GT":
return codon_pos, "splicePreserving"
else:
return codon_pos, "spliceTruncating"
# insertion 2-nt downstream of exon end
elif self.idl_type == 1 and self.pos - self.exon_end == 2:
if self.idl_seq[0] == "T":
return codon_pos, "splicePreserving"
else:
return codon_pos, "spliceTruncating"
| |
Create user request
"""
def __init__(self, headers=None, avatar=None, description=None, email=None, nick_name=None, phone=None, role=None,
status=None, user_data=None, user_id=None, user_name=None):
self.headers = headers # type: Dict[str, str]
# 头像
self.avatar = avatar # type: str
# 描述信息
self.description = description # type: str
# 邮箱
self.email = email # type: str
# 昵称
self.nick_name = nick_name # type: str
# 电话号码
self.phone = phone # type: str
# 角色
self.role = role # type: str
# 状态
self.status = status # type: str
# 用户自定义数据,格式为json,可用于配置项、少量临时数据等存储,不超过1K
self.user_data = user_data # type: dict
# 用户 ID
self.user_id = user_id # type: str
# 用户名称
self.user_name = user_name # type: str
def validate(self):
if self.description is not None:
self.validate_max_length(self.description, 'description', 1024)
self.validate_required(self.user_id, 'user_id')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.avatar is not None:
result['avatar'] = self.avatar
if self.description is not None:
result['description'] = self.description
if self.email is not None:
result['email'] = self.email
if self.nick_name is not None:
result['nick_name'] = self.nick_name
if self.phone is not None:
result['phone'] = self.phone
if self.role is not None:
result['role'] = self.role
if self.status is not None:
result['status'] = self.status
if self.user_data is not None:
result['user_data'] = self.user_data
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('avatar') is not None:
self.avatar = map.get('avatar')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('email') is not None:
self.email = map.get('email')
if map.get('nick_name') is not None:
self.nick_name = map.get('nick_name')
if map.get('phone') is not None:
self.phone = map.get('phone')
if map.get('role') is not None:
self.role = map.get('role')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('user_data') is not None:
self.user_data = map.get('user_data')
if map.get('user_id') is not None:
self.user_id = map.get('user_id')
if map.get('user_name') is not None:
self.user_name = map.get('user_name')
return self
class CreateUserResponse(TeaModel):
"""
Create user response
"""
def __init__(self, avatar=None, created_at=None, default_drive_id=None, description=None, domain_id=None,
email=None, nick_name=None, phone=None, role=None, status=None, updated_at=None, user_data=None,
user_id=None, user_name=None):
# 头像
self.avatar = avatar # type: str
# 用户创建时间
self.created_at = created_at # type: int
# 默认 Drive ID
self.default_drive_id = default_drive_id # type: str
# 用户备注信息
self.description = description # type: str
# Domain ID
self.domain_id = domain_id # type: str
# 邮箱
self.email = email # type: str
# 昵称
self.nick_name = nick_name # type: str
# 电话
self.phone = phone # type: str
# 角色
self.role = role # type: str
# 用户状态
self.status = status # type: str
# 用户修改时间
self.updated_at = updated_at # type: int
# 用户自定义数据,格式为json,可用于配置项、少量临时数据等存储,不超过1K
self.user_data = user_data # type: dict
# 用户 ID
self.user_id = user_id # type: str
# 用户名称
self.user_name = user_name # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.avatar is not None:
result['avatar'] = self.avatar
if self.created_at is not None:
result['created_at'] = self.created_at
if self.default_drive_id is not None:
result['default_drive_id'] = self.default_drive_id
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.email is not None:
result['email'] = self.email
if self.nick_name is not None:
result['nick_name'] = self.nick_name
if self.phone is not None:
result['phone'] = self.phone
if self.role is not None:
result['role'] = self.role
if self.status is not None:
result['status'] = self.status
if self.updated_at is not None:
result['updated_at'] = self.updated_at
if self.user_data is not None:
result['user_data'] = self.user_data
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
if map.get('avatar') is not None:
self.avatar = map.get('avatar')
if map.get('created_at') is not None:
self.created_at = map.get('created_at')
if map.get('default_drive_id') is not None:
self.default_drive_id = map.get('default_drive_id')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('email') is not None:
self.email = map.get('email')
if map.get('nick_name') is not None:
self.nick_name = map.get('nick_name')
if map.get('phone') is not None:
self.phone = map.get('phone')
if map.get('role') is not None:
self.role = map.get('role')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('updated_at') is not None:
self.updated_at = map.get('updated_at')
if map.get('user_data') is not None:
self.user_data = map.get('user_data')
if map.get('user_id') is not None:
self.user_id = map.get('user_id')
if map.get('user_name') is not None:
self.user_name = map.get('user_name')
return self
class DeleteUserRequest(TeaModel):
"""
Delete user request
"""
def __init__(self, headers=None, user_id=None):
self.headers = headers # type: Dict[str, str]
# 用户 ID
self.user_id = user_id # type: str
def validate(self):
self.validate_required(self.user_id, 'user_id')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.user_id is not None:
result['user_id'] = self.user_id
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('user_id') is not None:
self.user_id = map.get('user_id')
return self
class DeleteUserResponse(TeaModel):
"""
Delete user response
"""
def __init__(self):
pass
def validate(self):
pass
def to_map(self):
result = {}
return result
def from_map(self, map={}):
return self
class GetUserRequest(TeaModel):
"""
Get user request
"""
def __init__(self, headers=None, user_id=None):
self.headers = headers # type: Dict[str, str]
# 用户 ID, 使用ak方式访问,该项必传, access_token访问如果不传,默认取自己的user信息
# example
self.user_id = user_id # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.user_id is not None:
result['user_id'] = self.user_id
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('user_id') is not None:
self.user_id = map.get('user_id')
return self
class GetUserResponse(TeaModel):
"""
Get user response
"""
def __init__(self, avatar=None, created_at=None, default_drive_id=None, description=None, domain_id=None,
email=None, nick_name=None, phone=None, role=None, status=None, updated_at=None, user_data=None,
user_id=None, user_name=None):
# 头像
self.avatar = avatar # type: str
# 用户创建时间
self.created_at = created_at # type: int
# 默认 Drive ID
self.default_drive_id = default_drive_id # type: str
# 用户备注信息
self.description = description # type: str
# Domain ID
self.domain_id = domain_id # type: str
# 邮箱
self.email = email # type: str
# 昵称
self.nick_name = nick_name # type: str
# 电话
self.phone = phone # type: str
# 角色
self.role = role # type: str
# 用户状态
self.status = status # type: str
# 用户修改时间
self.updated_at = updated_at # type: int
# 用户自定义数据,格式为json,可用于配置项、少量临时数据等存储,不超过1K
self.user_data = user_data # type: dict
# 用户 ID
self.user_id = user_id # type: str
# 用户名称
self.user_name = user_name # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.avatar is not None:
result['avatar'] = self.avatar
if self.created_at is not None:
result['created_at'] = self.created_at
if self.default_drive_id is not None:
result['default_drive_id'] = self.default_drive_id
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.email is not None:
result['email'] = self.email
if self.nick_name is not None:
result['nick_name'] = self.nick_name
if self.phone is not None:
result['phone'] = self.phone
if self.role is not None:
result['role'] = self.role
if self.status is not None:
result['status'] = self.status
if self.updated_at is not None:
result['updated_at'] = self.updated_at
if self.user_data is not None:
result['user_data'] = self.user_data
if self.user_id is not None:
result['user_id'] = self.user_id
if self.user_name is not None:
result['user_name'] = self.user_name
return result
def from_map(self, map={}):
if map.get('avatar') is not None:
self.avatar = map.get('avatar')
if map.get('created_at') is not None:
self.created_at = map.get('created_at')
if map.get('default_drive_id') is not None:
self.default_drive_id = map.get('default_drive_id')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('email') is not None:
self.email = map.get('email')
if map.get('nick_name') is not None:
self.nick_name = map.get('nick_name')
if map.get('phone') is not None:
self.phone = map.get('phone')
if map.get('role') is not None:
self.role = map.get('role')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('updated_at') is not None:
self.updated_at = map.get('updated_at')
if map.get('user_data') is not None:
self.user_data = map.get('user_data')
if map.get('user_id') is not None:
self.user_id = map.get('user_id')
if map.get('user_name') is not None:
self.user_name = map.get('user_name')
return self
class ListUserRequest(TeaModel):
"""
List user request
"""
def __init__(self, headers=None, limit=None, marker=None):
self.headers = headers # type: Dict[str, str]
# 每页大小限制
self.limit = limit # type: int
# 翻页标记
self.marker = marker # type: str
def validate(self):
if self.limit is not None:
self.validate_maximum(self.limit, 'limit', 100)
self.validate_minimum(self.limit, 'limit', | |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""service provides funcs for working with ``Service`` instances.
:func:`extract_report_spec` obtains objects used to determine what metrics,
labels and logs are included in a report request.
:class:`MethodRegistry` obtains a registry of `MethodInfo` instances from the
data within a `Service` which can then be used to determine which methods get
tracked.
:class:`Loaders` enumerates the different ways in which to obtain a usable
``Service`` instance
"""
from __future__ import absolute_import
import collections
import logging
import os
from apitools.base.py import encoding
from enum import Enum
from . import label_descriptor, metric_descriptor, messages, path_template
from google.api.config import service_config
logger = logging.getLogger(__name__)
CONFIG_VAR = 'ENDPOINTS_SERVICE_CONFIG_FILE'
def _load_from_well_known_env():
if CONFIG_VAR not in os.environ:
logger.info('did not load service; no environ var %s', CONFIG_VAR)
return None
config_file = os.environ[CONFIG_VAR]
if not os.path.exists(os.environ[CONFIG_VAR]):
logger.warn('did not load service; missing config file %s', config_file)
return None
try:
with open(config_file) as f:
return encoding.JsonToMessage(messages.Service, f.read())
except ValueError:
logger.warn('did not load service; bad json config file %s', config_file)
return None
_SIMPLE_CONFIG = """
{
"name": "allow-all",
"http": {
"rules": [{
"selector": "allow-all.GET",
"get": "**"
}, {
"selector": "allow-all.POST",
"post": "**"
}]
},
"usage": {
"rules": [{
"selector" : "allow-all.GET",
"allowUnregisteredCalls" : true
}, {
"selector" : "allow-all.POST",
"allowUnregisteredCalls" : true
}]
}
}
"""
_SIMPLE_CORE = encoding.JsonToMessage(messages.Service, _SIMPLE_CONFIG)
def _load_simple():
return encoding.CopyProtoMessage(_SIMPLE_CORE)
class Loaders(Enum):
"""Enumerates the functions used to load service configs."""
# pylint: disable=too-few-public-methods
ENVIRONMENT = (_load_from_well_known_env,)
SIMPLE = (_load_simple,)
FROM_SERVICE_MANAGEMENT = (service_config.fetch_service_config,)
def __init__(self, load_func):
"""Constructor.
load_func is used to load a service config
"""
self._load_func = load_func
def load(self, **kw):
return self._load_func(**kw)
class MethodRegistry(object):
"""Provides a registry of the api methods defined by a ``Service``.
During construction, ``MethodInfo`` instances are extracted from a
``Service``. The are subsequently accessible via the :func:`lookup` method.
"""
# pylint: disable=too-few-public-methods
_OPTIONS = 'OPTIONS'
def __init__(self, service):
"""Constructor.
Args:
service (:class:`google.api.gen.servicecontrol_v1_messages.Service`):
a service instance
"""
if not isinstance(service, messages.Service):
raise ValueError('service should be an instance of Service')
if not service.name:
raise ValueError('Bad service: the name is missing')
self._service = service # the service that provides the methods
self._extracted_methods = {} # tracks all extracted_methods by selector
self._auth_infos = self._extract_auth_config()
# tracks urls templates
self._templates_method_infos = collections.defaultdict(list)
self._extract_methods()
def lookup(self, http_method, path):
http_method = http_method.lower()
if path.startswith('/'):
path = path[1:]
tmi = self._templates_method_infos.get(http_method)
if not tmi:
logger.debug('No methods for http method %s in %s',
http_method,
self._templates_method_infos.keys())
return None
# pylint: disable=fixme
# TODO: speed this up if it proves to be bottleneck.
#
# There is sophisticated trie-based solution in esp, something similar
# could be built around the path_template implementation
for template, method_info in tmi:
logger.debug('trying %s with template %s', path, template)
try:
template.match(path)
logger.debug('%s matched template %s', path, template)
return method_info
except path_template.ValidationException:
logger.debug('%s did not match template %s', path, template)
continue
return None
def _extract_auth_config(self):
"""Obtains the authentication configurations."""
service = self._service
if not service.authentication:
return {}
auth_infos = {}
for auth_rule in service.authentication.rules:
selector = auth_rule.selector
provider_ids_to_audiences = {}
for requirement in auth_rule.requirements:
provider_id = requirement.providerId
if provider_id and requirement.audiences:
audiences = requirement.audiences.split(",")
provider_ids_to_audiences[provider_id] = audiences
auth_infos[selector] = AuthInfo(provider_ids_to_audiences)
return auth_infos
def _extract_methods(self):
"""Obtains the methods used in the service."""
service = self._service
all_urls = set()
urls_with_options = set()
if not service.http:
return
for rule in service.http.rules:
http_method, url = _detect_pattern_option(rule)
if not url or not http_method or not rule.selector:
logger.error('invalid HTTP binding encountered')
continue
# Obtain the method info
method_info = self._get_or_create_method_info(rule.selector)
if rule.body:
method_info.body_field_path = rule.body
if not self._register(http_method, url, method_info):
continue # detected an invalid url
all_urls.add(url)
if http_method == self._OPTIONS:
urls_with_options.add(url)
self._add_cors_options_selectors(all_urls - urls_with_options)
self._update_usage()
self._update_system_parameters()
def _register(self, http_method, url, method_info):
try:
http_method = http_method.lower()
template = path_template.PathTemplate(url)
self._templates_method_infos[http_method].append((template, method_info))
logger.debug('Registered template %s under method %s',
template,
http_method)
return True
except path_template.ValidationException:
logger.error('invalid HTTP template provided: %s', url)
return False
def _update_usage(self):
extracted_methods = self._extracted_methods
service = self._service
if not service.usage:
return
for rule in service.usage.rules:
selector = rule.selector
method = extracted_methods.get(selector)
if method:
method.allow_unregistered_calls = rule.allowUnregisteredCalls
else:
logger.error('bad usage selector: No HTTP rule for %s', selector)
def _get_or_create_method_info(self, selector):
extracted_methods = self._extracted_methods
info = self._extracted_methods.get(selector)
if info:
return info
auth_infos = self._auth_infos
auth_info = auth_infos[selector] if selector in auth_infos else None
info = MethodInfo(selector, auth_info)
extracted_methods[selector] = info
return info
def _add_cors_options_selectors(self, urls):
extracted_methods = self._extracted_methods
base_selector = '%s.%s' % (self._service.name, self._OPTIONS)
# ensure that no existing options selector is being used
options_selector = base_selector
n = 0
while extracted_methods.get(options_selector) is not None:
n += 1
options_selector = '%s.%d' % (base_selector, n)
method_info = self._get_or_create_method_info(options_selector)
method_info.allow_unregistered_calls = True
for u in urls:
self._register(self._OPTIONS, u, method_info)
def _update_system_parameters(self):
extracted_methods = self._extracted_methods
service = self._service
if not service.systemParameters:
return
rules = service.systemParameters.rules
for rule in rules:
selector = rule.selector
method = extracted_methods.get(selector)
if not method:
logger.error('bad system parameter: No HTTP rule for %s',
selector)
continue
for parameter in rule.parameters:
name = parameter.name
if not name:
logger.error('bad system parameter: no parameter name %s',
selector)
continue
if parameter.httpHeader:
method.add_header_param(name, parameter.httpHeader)
if parameter.urlQueryParameter:
method.add_url_query_param(name, parameter.urlQueryParameter)
class AuthInfo(object):
"""Consolidates auth information about methods defined in a ``Service``."""
def __init__(self, provider_ids_to_audiences):
"""Construct an AuthInfo instance.
Args:
provider_ids_to_audiences: a dictionary that maps from provider ids
to allowed audiences.
"""
self._provider_ids_to_audiences = provider_ids_to_audiences
def is_provider_allowed(self, provider_id):
return provider_id in self._provider_ids_to_audiences
def get_allowed_audiences(self, provider_id):
return self._provider_ids_to_audiences.get(provider_id, [])
class MethodInfo(object):
"""Consolidates information about methods defined in a ``Service``."""
API_KEY_NAME = 'api_key'
# pylint: disable=too-many-instance-attributes
def __init__(self, selector, auth_info):
self.selector = selector
self.auth_info = auth_info
self.allow_unregistered_calls = False
self.backend_address = ''
self.body_field_path = ''
self._url_query_parameters = collections.defaultdict(list)
self._header_parameters = collections.defaultdict(list)
def add_url_query_param(self, name, parameter):
self._url_query_parameters[name].append(parameter)
def add_header_param(self, name, parameter):
self._header_parameters[name].append(parameter)
def url_query_param(self, name):
return tuple(self._url_query_parameters[name])
def header_param(self, name):
return tuple(self._header_parameters[name])
@property
def api_key_http_header(self):
return self.header_param(self.API_KEY_NAME)
@property
def api_key_url_query_params(self):
return self.url_query_param(self.API_KEY_NAME)
def extract_report_spec(
service,
label_is_supported=label_descriptor.KnownLabels.is_supported,
metric_is_supported=metric_descriptor.KnownMetrics.is_supported):
"""Obtains the used logs, metrics and labels from a service.
label_is_supported and metric_is_supported are filter functions used to
determine if label_descriptors or metric_descriptors found in the service
are supported.
Args:
service (:class:`google.api.gen.servicecontrol_v1_messages.Service`):
a service instance
label_is_supported (:func): determines if a given label is supported
metric_is_supported (:func): determines if a given metric is supported
Return:
tuple: (
logs (set[string}), # the logs to report to
metrics (list[string]), # the metrics to use
labels (list[string]) # the labels to add
)
"""
resource_descs = service.monitoredResources
labels_dict = {}
logs = set()
if service.logging:
logs = _add_logging_destinations(
service.logging.producerDestinations,
resource_descs,
service.logs,
labels_dict,
label_is_supported
)
metrics_dict = {}
monitoring = service.monitoring
if monitoring:
for destinations in (monitoring.consumerDestinations,
monitoring.producerDestinations):
_add_monitoring_destinations(destinations,
resource_descs,
service.metrics,
metrics_dict,
metric_is_supported,
labels_dict,
label_is_supported)
return logs, metrics_dict.keys(), labels_dict.keys()
def _add_logging_destinations(destinations,
resource_descs,
log_descs,
labels_dict,
is_supported):
all_logs = set()
for d in destinations:
if not _add_labels_for_a_monitored_resource(resource_descs,
d.monitoredResource,
labels_dict,
is_supported):
continue # skip bad monitored resources
for log in d.logs:
if _add_labels_for_a_log(log_descs, log, labels_dict, is_supported):
all_logs.add(log) # only add correctly configured logs
return all_logs
def _add_monitoring_destinations(destinations,
resource_descs,
metric_descs,
metrics_dict,
metric_is_supported,
labels_dict,
label_is_supported):
# pylint: disable=too-many-arguments
for d in destinations:
if not _add_labels_for_a_monitored_resource(resource_descs,
d.monitoredResource,
labels_dict,
label_is_supported):
continue # skip bad monitored resources
for metric_name in d.metrics:
metric_desc = _find_metric_descriptor(metric_descs, metric_name,
metric_is_supported)
if not metric_desc:
continue # skip unrecognized or unsupported metric
if not _add_labels_from_descriptors(metric_desc.labels, labels_dict,
label_is_supported):
continue # skip metrics with bad labels
metrics_dict[metric_name] = metric_desc
def _add_labels_from_descriptors(descs, labels_dict, is_supported):
# only add labels if there are no conflicts
for desc in descs:
existing = labels_dict.get(desc.key)
if existing and existing.valueType != desc.valueType:
logger.warn('halted label scan: conflicting label in %s', desc.key)
return False
# Update labels_dict
for desc in descs:
if is_supported(desc):
labels_dict[desc.key] = desc
return True
def _add_labels_for_a_log(logging_descs, log_name, labels_dict, is_supported):
for d in logging_descs:
if d.name == log_name:
_add_labels_from_descriptors(d.labels, labels_dict, is_supported)
return True
logger.warn('bad log label scan: log not found %s', log_name)
| |
import sys, pygame, config, time
import sprites, audio
from heart import Heart
#The player class
class Player():
def __init__(self): # Initialiazion of content
self.load_content()
def load_content(self) -> None: # Loades once
self.setPlayerPos(100, config.SCREEN_HEIGHT-80) # player position
self.crouchReduce = 0 # default value to subtract of player pos y when crouch
self.gameoverX = 700 # gameoversprite Y axis position
self.instructionsX = 1000 # instructions when gameover Y axis position
self.speed = 10 # player speed
self.isJump = False # if player is jumping
self.moving = False # if player is moving
self.crouch = False # if player is crouching
self.dead = False # if player is dead
self.health = 5 # player health
self.maxHealth = self.health # player max health = to health
print("your health =",self.health) # print start health
self.oncollision_has_been_called = False # collision with enemy is false
#---------------Animation---------------#
self.anim(0,0,5) #sets the start frame and timer to 0, and animationFPS to default 5
self.player = [sprites.anim_player0, sprites.anim_player1, sprites.anim_player2,
sprites.anim_player3, sprites.anim_player4, sprites.anim_player5,
sprites.anim_player6, sprites.anim_player7, sprites.anim_player8,
sprites.anim_player9, sprites.anim_player10, sprites.anim_player11,
sprites.anim_player12] # all player sprites put into a list
self.heart = sprites.img_heart # load the heart sprite
self.dead_heart = sprites.img_dead_heart # load the dead heart sprite
def update(self, delta_time, zombies, demons, hearts, lightnings, hellhounds) -> None: # Updates every frame
self.handleInput() # calls the handleInput function
self.playerGravity() # calls the playerGravity function
self.constrainPlayer() # calls the constrainPlayer function
self.animUpdate(delta_time) # calls the animUpdate function
self.detectionZombie(zombies) # calls the detectionZombie function
self.detectionDemon(demons) # calls the detectionDemon function
self.detectionHeart(hearts) # calls the detectionHeart function
self.detectionLightning(lightnings) # calls the detectionLightning function
self.detectionHellhound(hellhounds) # calls the detectionHellhound function
#change speed of animation when moving and when not moving
if self.moving == True:
#animation speed if player is walking
self.animationFPS = 12
elif self.moving == False and not self.dead:
#decrease animation speed if player is crouching and not dead
self.animationFPS = 5
else:
#increase animation speed if player dies
self.animationFPS = 16
if self.dead: # if player is dead
self.moving = False # moving is false
#the gameover sprites move up on the screen
if self.gameoverX >= 100:
self.gameoverX -= 5
#the gameover instructions move up on the screen
if self.instructionsX >= 400:
self.instructionsX -=5
#player frame is set to 12 (last frame)
if self.currentFrame == 12:
#player moves left to creat a illusion
self.x -= 2
def draw(self, screen) -> None: # Update every frame
#draws the player
config.screen.blit(self.player[self.currentFrame], (self.x, self.y))
self.heartsSprite() # calls the heartsSprite function
self.gameOver()# calls the gameOver function
def gameOver(self): # the gameover function
if self.dead: # if player is dead
# display these sprites belove
config.screen.blit(sprites.img_gameover, ((config.SCREEN_WIDTH-sprites.img_gameover.get_width())*0.5, self.gameoverX))
config.screen.blit(sprites.img_instructions, ((config.SCREEN_WIDTH-sprites.img_instructions.get_width())*0.5, self.instructionsX))
def heartsSprite (self): # the heart sprite fucntion
self.dhx = 30 # the first x value of the first heart
for i in range(self.maxHealth): # deadhealth is loaded once to not get updated once health has been subtracted
self.dhx += 45 # the distance between each heart
config.screen.blit(self.dead_heart, (self.dhx,30)) # displays the dead hearts by amount of health
self.hx = 30 # the first x value of the first heart
for i in range(self.health): # loops the code by the lenght of health
self.hx += 45 # the distance between each heart
config.screen.blit(self.heart, (self.hx,30)) # displays the hearts over the dead hearts by amount of health
def detectionLightning(self, lightnings): # the lightning detection fucntion
self.hitboxAdjustment = 20 # int value to create smaller hit boxes
self.hitboxAdjustment1 = 200 # int value to create smaller hit boxes
#player rectangle
playerrectangle = (self.x+15, self.y+self.crouchReduce, sprites.anim_player0.get_width()-self.hitboxAdjustment-10, sprites.anim_player0.get_height()-self.hitboxAdjustment)
#check every ligtning in lightnings
for lightning in lightnings:
#ligtning rectangle
lightningrectangle = (lightning.x+100, lightning.y, sprites.anim_lightning2.get_width()-self.hitboxAdjustment1, sprites.anim_lightning2.get_height())
pygame.draw.rect(config.screen,(0,255,255), lightningrectangle) # draws the ligtning hitbox
#checks if player and lightning is colliding
if (playerrectangle[0] < lightningrectangle[0] + lightningrectangle[2] and
playerrectangle[0] + playerrectangle[2] > lightningrectangle[0] and
playerrectangle[1] < lightningrectangle[1] + lightningrectangle[3] and
playerrectangle[1] + playerrectangle[3] > lightningrectangle[1]):
if not lightning.collidedLastFrame: # if lightning collidedLastFrame boolean is false
self.oncollision() # call the oncollision function
lightning.collidedLastFrame = True # set the collidedLastFrame boolean to true
else: #otherwise if player did not collide
lightning.collidedLastFrame = False # set the collidedLastFrame boolean to false
def detectionHellhound(self, hellhounds): #the hellhound detection function
self.hitboxAdjustment = 20 # int value to create smaller hit boxes
#player rectangle
playerrectangle = (self.x+15, self.y+self.crouchReduce, sprites.anim_player0.get_width()-self.hitboxAdjustment-10, sprites.anim_player0.get_height()-self.hitboxAdjustment)
#check every hellhound in hellhounds
for hellhound in hellhounds:
#zombie rectangle
hellhoundrectangle = (hellhound.x, hellhound.y, sprites.anim_hellhound0.get_width()-self.hitboxAdjustment, sprites.anim_hellhound0.get_height()-self.hitboxAdjustment)
pygame.draw.rect(config.screen,(0,0,255), hellhoundrectangle) # draws the hellhound hitbox
#checks if player and lightning is colliding
if (playerrectangle[0] < hellhoundrectangle[0] + hellhoundrectangle[2] and
playerrectangle[0] + playerrectangle[2] > hellhoundrectangle[0] and
playerrectangle[1] < hellhoundrectangle[1] + hellhoundrectangle[3] and
playerrectangle[1] + playerrectangle[3] > hellhoundrectangle[1]):
if not hellhound.collidedLastFrame: # if hellhound collidedLastFrame boolean is false
self.oncollision() # call the oncollision function
hellhound.collidedLastFrame = True # set the collidedLastFrame boolean to true
else: #otherwise if player did not collide
hellhound.collidedLastFrame = False # set the collidedLastFrame boolean to false
def detectionZombie(self, zombies): #the zombie detection function
self.hitboxAdjustment = 20 # int value to create smaller hit boxes
#player rectangle
playerrectangle = (self.x+15, self.y+self.crouchReduce, sprites.anim_player0.get_width()-self.hitboxAdjustment-10, sprites.anim_player0.get_height()-self.hitboxAdjustment)
#check every zombie in zombies
for zombie in zombies:
#zombie rectangle
zombierectangle = (zombie.x, zombie.y, sprites.anim_zombie0.get_width()-self.hitboxAdjustment, sprites.anim_zombie0.get_height()-self.hitboxAdjustment)
pygame.draw.rect(config.screen,(0,0,255), zombierectangle) # draws the zombie hitbox
#checks if player and lightning is colliding
if (playerrectangle[0] < zombierectangle[0] + zombierectangle[2] and
playerrectangle[0] + playerrectangle[2] > zombierectangle[0] and
playerrectangle[1] < zombierectangle[1] + zombierectangle[3] and
playerrectangle[1] + playerrectangle[3] > zombierectangle[1]):
if not zombie.collidedLastFrame: # if zombie collidedLastFrame boolean is false
self.oncollision() # call the oncollision function
zombie.collidedLastFrame = True # set the collidedLastFrame boolean to true
else: #otherwise if player did not collide
zombie.collidedLastFrame = False # set the collidedLastFrame boolean to false
def detectionDemon(self, demons): #the demon detection function
self.hitboxAdjustment = 20 # int value to create smaller hit boxes
#player rectangle
playerrectangle = (self.x+15, self.y+self.crouchReduce, sprites.anim_player0.get_width()-self.hitboxAdjustment-10, sprites.anim_player0.get_height()-self.hitboxAdjustment)
# draw player hitbox
pygame.draw.rect(config.screen,(0,255,0), playerrectangle)
#check every demon in demons
for demon in demons:
#demon rectangle
demonrectangle = (demon.x, demon.y, sprites.anim_demon0.get_width()-self.hitboxAdjustment-40, sprites.anim_demon0.get_height()-self.hitboxAdjustment)
# draw hitbox
pygame.draw.rect(config.screen,(255,0,0), demonrectangle)
#checks if player and demon is colliding
if (playerrectangle[0] < demonrectangle[0] + demonrectangle[2] and
playerrectangle[0] + playerrectangle[2] > demonrectangle[0] and
playerrectangle[1] < demonrectangle[1] + demonrectangle[3] and
playerrectangle[1] + playerrectangle[3] > demonrectangle[1]):
if not demon.collidedLastFrame: # if demon collidedLastFrame boolean is false
self.oncollision() # call the oncollision function
demon.collidedLastFrame = True # set the collidedLastFrame boolean to true
else: #otherwise if player did not collide
demon.collidedLastFrame = False # set the collidedLastFrame boolean to false
def detectionHeart(self, hearts): # the heart detection function
self.hitboxAdjustment = 20 # int value to create smaller hit boxes
#player rectangle
playerrectangle = (self.x+15, self.y+self.crouchReduce, sprites.anim_player0.get_width()-self.hitboxAdjustment-10, sprites.anim_player0.get_height()-self.hitboxAdjustment)
#check every heart in hearts
for heart in hearts:
#heart rectangle
heartrectangle = (heart.x, heart.y, sprites.anim_heart0.get_width()-self.hitboxAdjustment, sprites.anim_heart0.get_height()-self.hitboxAdjustment)
pygame.draw.rect(config.screen,(0,0,255), heartrectangle) # draw heart hitbox
if (playerrectangle[0] < heartrectangle[0] + heartrectangle[2] and
playerrectangle[0] + playerrectangle[2] > heartrectangle[0] and
playerrectangle[1] < heartrectangle[1] + heartrectangle[3] and
playerrectangle[1] + playerrectangle[3] > heartrectangle[1]):
if not heart.collidedLastFrame: # if heart collidedLastFrame boolean is false
self.oncollisionHeart() # call the oncollisionHeart function
heart.collidedLastFrame = True # set the collidedLastFrame boolean to true
heart.destroy = True # destroy heart if player collided
else: #otherwise if player did not collide
heart.collidedLastFrame = False # set the collidedLastFrame boolean to false
def oncollision(self): # if player collided function
print("Collision!!")
self.oncollision_has_been_called = True
if self.health > 0: # if health is greater than 0 it can decrease health by 1
self.health -= 1
self.showBlooad = True # show blood
print("your health =",self.health)
if config.sound_effect_on: # if sound is on in the settings
audio.play_effect(audio.effect_hurt) #play sound
if self.health <= 0: # if health is less than or = 0 then player is dead
print("you are dead")
self.dead = True
def oncollisionHeart(self): # the heart collision | |
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for CueGUI tree widgets.
Provides extended QTreeWidget functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import map
from builtins import range
import time
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
import cuegui.AbstractWidgetItem
import cuegui.Constants
import cuegui.ItemDelegate
import cuegui.Logger
import cuegui.Utils
logger = cuegui.Logger.getLogger(__file__)
COLUMN_NAME = 0
COLUMN_WIDTH = 1
COLUMN_FUNCTION = 2
COLUMN_SORTBY = 3
COLUMN_DELEGATE = 4
COLUMN_TOOLTIP = 5
COLUMN_INFO_LENGTH = 6
DEFAULT_LAMBDA = lambda s: ""
DEFAULT_NAME = ""
DEFAULT_WIDTH = 0
class AbstractTreeWidget(QtWidgets.QTreeWidget):
"""Base class for CueGUI tree widgets.
Provides extended QTreeWidget functionality."""
itemDoubleClicked = QtCore.Signal(QtWidgets.QTreeWidgetItem, int)
itemSingleClicked = QtCore.Signal(QtWidgets.QTreeWidgetItem, int)
updated = QtCore.Signal()
def __init__(self, parent):
"""Standard method to display a list or tree using QTreeWidget
columnInfoByType is a dictionary of lists keyed to opencue.Constants.TYPE_*
Each value is a list of lists that each define a column.
[<column name>, <width>, <lambda function>, <function name for sorting>,
<column delegate class>]
Only supported on the primary column:
<column name>, <column delegate class>, <width>
@type parent: QWidget
@param parent: The widget to set as the parent"""
QtWidgets.QTreeWidget.__init__(self, parent)
self._items = {}
self._lastUpdate = 0
self._itemsLock = QtCore.QReadWriteLock()
self._timer = QtCore.QTimer(self)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setUniformRowHeights(True)
self.setAutoScroll(False)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setAlternatingRowColors(True)
self.setSortingEnabled(True)
self.header().setSectionsMovable(True)
self.header().setStretchLastSection(True)
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.setItemDelegate(cuegui.ItemDelegate.ItemDelegate(self))
self.__setupColumns()
self.__setupColumnMenu()
# pylint: disable=no-member
self.itemClicked.connect(self.__itemSingleClickedEmitToApp)
self.itemDoubleClicked.connect(self.__itemDoubleClickedEmitToApp)
self._timer.timeout.connect(self.updateRequest)
QtGui.qApp.request_update.connect(self.updateRequest)
# pylint: enable=no-member
self.updateRequest()
self.setUpdateInterval(10)
def closeEvent(self, event):
if hasattr(self, '_timer'):
self._timer.stop()
del self._timer
if hasattr(self, '__ticksTimer'):
self.__ticksTimer.stop()
del self.__ticksTimer
event.accept()
# pylint: disable=attribute-defined-outside-init
def startColumnsForType(self, itemType):
"""Start column definitions for the given item type. The first call to
this defines the primary column type used to populate the column headers.
@type itemType: int
@param itemType: The id for the item type"""
# First call defines the primary type
if not hasattr(self, "columnPrimaryType"):
self.columnPrimaryType = itemType
self.__columnPrimaryType = itemType
self.__columnInfoByType = {}
self.__columnInfoByType[itemType] = []
self.__columnCurrent = itemType
# pylint: disable=redefined-builtin
def addColumn(self, name, width, id=0, default=True,
data=DEFAULT_LAMBDA, sort=None,
delegate=None, tip=""):
"""Define a new column for the current item type.
@type name: str
@param name: The name of the column.
@type width: int
@param width: The width of the column.
@type id: int
@param id: A unique id
@type default: bool
@param default: Will determine if the column is displayed by default.
@type data: callable
@param data: The callable to use when displaying.
@type sort: callable
@param sort: The callable to use when sorting.
@type delegate: delegate
@param delegate: The delegate that should draw the cells.
@type tip: str
@param tip: A tooltip for the column."""
assert isinstance(name, str), "Column name must be string"
assert isinstance(width, int), "Column width must be int"
assert hasattr(data, '__call__'), "Column data function must be callable"
assert isinstance(tip, str), "Column tooltip must be string"
columnsInfo = self.__columnInfoByType[self.__columnCurrent]
columnsInfo.append([name, width, data, sort, delegate, tip, default, id])
def __setupColumns(self):
"""Setup the QTreeWidget based on the column information"""
primaryColumnInfo = self.__columnInfoByType[self.__columnPrimaryType]
self.setColumnCount(len(primaryColumnInfo))
columnNames = []
for col, columnInfo in enumerate(primaryColumnInfo):
# Set up column widths
self.setColumnWidth(col, primaryColumnInfo[col][COLUMN_WIDTH])
# Setup the column tooltips
if columnInfo[COLUMN_TOOLTIP]:
self.model().setHeaderData(col, QtCore.Qt.Horizontal,
columnInfo[COLUMN_TOOLTIP],
QtCore.Qt.ToolTipRole)
# Setup column delegates
if primaryColumnInfo[col][COLUMN_DELEGATE]:
self.setItemDelegateForColumn(col, primaryColumnInfo[col][COLUMN_DELEGATE](self))
# Setup column name list
if columnInfo[COLUMN_NAME].startswith("_"):
columnNames.append("")
else:
columnNames.append(columnInfo[COLUMN_NAME])
self.setHeaderLabels(columnNames)
def startTicksUpdate(self, updateInterval,
updateWhenMinimized=False,
maxUpdateInterval=None):
"""A method of updating the display on a one second timer to avoid
multiple update requests and reduce excess cuebot calls.
You will need to implement self.tick, You do not need to provide
locking or unhandled error logging.
You will need to implement tick.
self.ticksWithoutUpdate = number of seconds since the last update.
self.ticksLock = QMutex"""
self.updateInterval = updateInterval
self.__updateWhenMinimized = updateWhenMinimized
self.__maxUpdateInterval = maxUpdateInterval
# Stop the default update method
if hasattr(self, "_timer"):
self._timer.stop()
self.ticksLock = QtCore.QMutex()
self.__ticksTimer = QtCore.QTimer(self)
self.__ticksTimer.timeout.connect(self.__tick) # pylint: disable=no-member
self.__ticksTimer.start(1000)
self.ticksWithoutUpdate = 999
def tickNeedsUpdate(self):
"""Gets whether enough time has passed for contents to need an update."""
if self.ticksWithoutUpdate >= self.updateInterval:
if self.window().isMinimized():
if self.__maxUpdateInterval is not None and \
self.ticksWithoutUpdate >= self.__maxUpdateInterval:
# Sufficient maximum interval
return True
if not self.__updateWhenMinimized:
# Sufficient interval, except minimized
return False
# Sufficient interval, set to update when minimized
return True
# Sufficient interval, not minimized
return True
# Insufficient interval
return False
def __tick(self):
"""Provides locking and logging for the implementation of the tick
function"""
if not self.ticksLock.tryLock():
return
try:
# pylint: disable=broad-except
try:
self.tick()
except Exception as e:
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
finally:
self.ticksLock.unlock()
def tick(self):
"""Determines whether an update is needed and initiates updating logic.
Must be defined by inheriting classes."""
raise NotImplementedError
def getColumnInfo(self, columnType = None):
"""Returns the list that defines the column.
@type columnType: Constants.TYPE_*
@param columnType: If given, the column information for that type will
be returned. Otherwise the primary column information
will be returned
@rtype: list
@return: The list that defines the column,
(see AbstractTreeWidget.__init__() documentation)"""
if columnType:
return self.__columnInfoByType[columnType]
return self.__columnInfoByType[self.__columnPrimaryType]
@staticmethod
def __itemSingleClickedEmitToApp(item, col):
"""When an item is single clicked on:
emits "single_click(PyQt_PyObject)" to the app
@type item: QTreeWidgetItem
@param item: The item single clicked on
@type col: int
@param col: Column number single clicked on"""
del col
# pylint: disable=no-member
QtGui.qApp.single_click.emit(item.rpcObject)
# pylint: enable=no-member
@staticmethod
def __itemDoubleClickedEmitToApp(item, col):
"""Handles when an item is double clicked on.
emits "double_click(PyQt_PyObject)" to the app
emits "view_object(PyQt_PyObject)" to the app
@type item: QTreeWidgetItem
@param item: The item double clicked on
@type col: int
@param col: Column number double clicked on"""
del col
# pylint: disable=no-member
QtGui.qApp.view_object.emit(item.rpcObject)
QtGui.qApp.double_click.emit(item.rpcObject)
# pylint: enable=no-member
def addObject(self, rpcObject):
"""Adds or updates an rpcObject in the list using the _createItem function
and object.proxy as the key. Used when user is adding an item but will
not want to wait for an update.
@type paramA: opencue object
@param paramA: Object that provides .proxy"""
self._itemsLock.lockForWrite()
try:
# If id already exists, update it
objectKey = cuegui.Utils.getObjectKey(rpcObject)
if objectKey in self._items:
self._items[objectKey].update(rpcObject)
# If id does not exist, create it
else:
self._items[objectKey] = self._createItem(rpcObject)
finally:
self._itemsLock.unlock()
def removeItem(self, item):
"""Removes an item from the TreeWidget
@param item: A tree widget item
@type item: AbstractTreeWidgetItem"""
self._itemsLock.lockForWrite()
try:
self._removeItem(item)
finally:
self._itemsLock.unlock()
def _removeItem(self, item):
"""Removes an item from the TreeWidget without locking
@type item: AbstractTreeWidgetItem or String
@param item: A tree widget item or the string with the id of the item"""
if item in self._items:
item = self._items[item]
elif not isinstance(item, cuegui.AbstractWidgetItem.AbstractWidgetItem):
# if the parent was already deleted, then this one was too
return
# If it has children, they must be deleted first
if item.childCount() > 0:
for child in item.takeChildren():
self._removeItem(child)
if item.isSelected():
item.setSelected(False)
if item.parent():
self.invisibleRootItem().removeChild(item)
self.takeTopLevelItem(self.indexOfTopLevelItem(item))
objectClass = item.rpcObject.__class__.__name__
objectId = item.rpcObject.id()
del self._items['{}.{}'.format(objectClass, objectId)]
def removeAllItems(self):
"""Removes all items from the tree."""
self._itemsLock.lockForWrite()
try:
self._items = {}
self.clear()
finally:
self._itemsLock.unlock()
def selectedObjects(self):
"""Provides a list of all objects from selected items
@return: A list of objects from selected items
@rtype: list<object>"""
return [item.rpcObject for item in self.selectedItems()]
def setUpdateInterval(self, seconds):
"""Changes the update interval
@param seconds: Update interval in seconds
@type seconds: int"""
self._timer.start(seconds * 1000)
def updateRequest(self):
"""Updates the items in the TreeWidget if sufficient time has passed
since last updated and the user has not scrolled recently if
self._limitUpdatesDuringScrollSetup() was called in the TreeWidget
object init"""
if time.time() - self._lastUpdate > cuegui.Constants.MINIMUM_UPDATE_INTERVAL:
if self.__limitUpdatesDuringScrollAllowUpdate():
self._update()
def _update(self):
"""Updates the items in the TreeWidget without checking when it was last
updated"""
self._lastUpdate = time.time()
if hasattr(QtGui.qApp, "threadpool"):
# pylint: disable=no-member
QtGui.qApp.threadpool.queue(
self._getUpdate, self._processUpdate, "getting data for %s" % self.__class__)
# pylint: enable=no-member
else:
logger.warning("threadpool not found, doing work in gui thread")
self._processUpdate(None, self._getUpdate())
def _processUpdate(self, work, rpcObjects):
| |
<gh_stars>1-10
import os
import itertools
from datetime import datetime
from collections import ChainMap
from math import isnan
import glob
import json
import locale
from shutil import copyfile
import pandas as pd
from pandas.api.types import is_numeric_dtype
from cowidev.utils import paths
from cowidev.utils.utils import pd_series_diff_values
from cowidev.utils.clean import clean_date
from cowidev.vax.cmd.utils import get_logger
from cowidev.vax.utils.checks import VACCINES_ACCEPTED
logger = get_logger()
class Bucket(object):
def __init__(self, **kwargs):
self._dict = kwargs
self.__dict__.update(kwargs)
class DatasetGenerator:
def __init__(self, inputs, outputs):
# Inputs
self.inputs = inputs
# Outputs
self.outputs = outputs
# Others
self.aggregates = self.build_aggregates()
self._countries_covered = None
@property
def column_names_int(self):
return [
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"total_boosters",
"daily_vaccinations_raw",
"daily_vaccinations",
"daily_vaccinations_per_million",
"new_vaccinations_smoothed",
"new_vaccinations_smoothed_per_million",
"new_vaccinations",
"new_people_vaccinated_smoothed",
]
def build_aggregates(self):
continent_countries = pd.read_csv(self.inputs.continent_countries, usecols=["Entity", "Unnamed: 3"])
eu_countries = pd.read_csv(self.inputs.eu_countries, usecols=["Country"], squeeze=True).tolist()
income_groups = pd.concat(
[
pd.read_csv(self.inputs.income_groups, usecols=["Country", "Income group"]),
pd.read_csv(self.inputs.income_groups_compl, usecols=["Country", "Income group"]),
],
ignore_index=True,
)
aggregates = {
"World": {
"excluded_locs": ["England", "Northern Ireland", "Scotland", "Wales"],
"included_locs": None,
},
"European Union": {
"excluded_locs": None,
"included_locs": eu_countries,
},
"World excl. China": {
"excluded_locs": ["China"],
"included_locs": None,
},
}
for continent in [
"Asia",
"Africa",
"Europe",
"North America",
"Oceania",
"South America",
]:
aggregates[continent] = {
"excluded_locs": None,
"included_locs": (
continent_countries.loc[continent_countries["Unnamed: 3"] == continent, "Entity"].tolist()
),
}
for group in income_groups["Income group"].unique():
aggregates[group] = {
"excluded_locs": None,
"included_locs": (income_groups.loc[income_groups["Income group"] == group, "Country"].tolist()),
}
return aggregates
def pipeline_automated(self, df: pd.DataFrame) -> pd.DataFrame:
"""Generate DataFrame for automated states."""
return df.sort_values(by=["automated", "location"], ascending=[False, True])[
["location", "automated"]
].reset_index(drop=True)
def pipeline_locations(
self, df_vax: pd.DataFrame, df_metadata: pd.DataFrame, df_iso: pd.DataFrame
) -> pd.DataFrame:
"""Generate DataFrame for locations."""
def _pretty_vaccine(vaccines):
return ", ".join(sorted(v.strip() for v in vaccines.split(",")))
df_vax = (
df_vax.sort_values(by=["location", "date"])
.drop_duplicates(subset=["location"], keep="last")
.rename(
columns={
"date": "last_observation_date",
"source_url": "source_website",
}
)
)
if len(df_metadata) != len(df_vax):
loc_miss = pd_series_diff_values(df_metadata.location, df_vax.location)
a = df_metadata[df_metadata.location.isin(loc_miss)]
b = df_vax[df_vax.location.isin(loc_miss)]
print("metadata\n", a)
print("data\n", b)
raise ValueError(f"Missmatch between vaccination data and metadata! Unknown location {loc_miss}.")
return (
df_vax.assign(vaccines=df_vax.vaccine.apply(_pretty_vaccine)) # Keep only last vaccine set
.merge(df_metadata, on="location")
.merge(df_iso, on="location")
)[
[
"location",
"iso_code",
"vaccines",
"last_observation_date",
"source_name",
"source_website",
]
]
def _get_aggregate(self, df, agg_name, included_locs, excluded_locs):
# Take rows that matter
agg = df[~df.location.isin(self.aggregates.keys())] # remove aggregated rows
if excluded_locs is not None:
agg = agg[~agg.location.isin(excluded_locs)]
elif included_locs is not None:
agg = agg[agg.location.isin(included_locs)]
# Get full location-date grid
agg = (
pd.DataFrame(
itertools.product(agg.location.unique(), agg.date.unique()),
columns=[agg.location.name, agg.date.name],
)
.merge(agg, on=["date", "location"], how="outer")
.sort_values(by=["location", "date"])
)
# NaN: Forward filling + Zero-filling if all metric is NaN
cols = [
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"total_boosters",
]
grouper = agg.groupby("location")
for col in cols:
agg[col] = grouper[col].apply(lambda x: x.fillna(0) if x.isnull().all() else x.fillna(method="ffill"))
# Aggregate
agg = agg.groupby("date").sum().reset_index().assign(location=agg_name)
agg = agg[agg.date.dt.date < datetime.now().date()]
return agg
def pipe_aggregates(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info(f"Building aggregate regions {list(self.aggregates.keys())}")
aggs = []
for agg_name, _ in self.aggregates.items():
aggs.append(
self._get_aggregate(
df=df,
agg_name=agg_name,
included_locs=self.aggregates[agg_name]["included_locs"],
excluded_locs=self.aggregates[agg_name]["excluded_locs"],
)
)
return pd.concat([df] + aggs, ignore_index=True)
def pipe_daily(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Adding daily metrics")
df = df.sort_values(by=["location", "date"])
df = df.assign(new_vaccinations=df.groupby("location").total_vaccinations.diff())
df.loc[df.date.diff().dt.days > 1, "new_vaccinations"] = None
df = df.sort_values(["location", "date"])
return df
def _add_smoothed(self, df: pd.DataFrame) -> pd.DataFrame:
# NEW VACCINATIONS
# Range where total_vaccinations is registered
dt_min = df.dropna(subset=["total_vaccinations"]).date.min()
dt_max = df.dropna(subset=["total_vaccinations"]).date.max()
df_nan = df[(df.date < dt_min) | (df.date > dt_max)]
# Add missing dates
df = df.merge(
pd.Series(pd.date_range(dt_min, dt_max), name="date"),
how="right",
).sort_values(by="date")
# Calculate and add smoothed vars
df["new_vaccinations_smoothed"] = (
df.total_vaccinations.interpolate(method="linear")
.diff()
.rolling(7, min_periods=1)
.mean()
.apply(lambda x: round(x) if not isnan(x) else x)
)
# Add missing dates
df = pd.concat([df, df_nan], ignore_index=True).sort_values("date")
df.loc[:, "location"] = df.location.dropna().iloc[0]
# PEOPLE_VACCINATED
# Range where people_vaccinated is registered
dt_min = df.dropna(subset=["people_vaccinated"]).date.min()
dt_max = df.dropna(subset=["people_vaccinated"]).date.max()
df_nan = df[(df.date < dt_min) | (df.date > dt_max)]
# Add missing dates
df = df.merge(
pd.Series(pd.date_range(dt_min, dt_max), name="date"),
how="right",
).sort_values(by="date")
# Calculate and add smoothed vars
df["new_people_vaccinated_smoothed"] = (
df.people_vaccinated.interpolate(method="linear")
.diff()
.rolling(7, min_periods=1)
.mean()
.apply(lambda x: round(x) if not isnan(x) else x)
)
# Add missing dates
df = pd.concat([df, df_nan], ignore_index=True).sort_values("date")
df.loc[:, "location"] = df.location.dropna().iloc[0]
return df
def pipe_smoothed(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Adding smoothed variables")
return df.groupby("location").apply(self._add_smoothed).reset_index(drop=True)
def get_population(self, df_subnational: pd.DataFrame) -> pd.DataFrame:
# Build population dataframe
column_rename = {"entity": "location", "population": "population"}
pop = pd.read_csv(self.inputs.population, usecols=column_rename.keys()).rename(columns=column_rename)
pop = pd.concat([pop, df_subnational], ignore_index=True)
# The US population denominator is more complex to calculate, as the US CDC is pulling
# together data from multiple territories and federal agencies to build national figures.
# To ensure that we use the correct territorial boundaries in the denominator, we use the
# population figure provided by the CDC in its data.
pop.loc[pop.location == "United States", "population"] = 332008832
return pop
def pipe_capita(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Adding per-capita variables")
# Get data
df_subnational = pd.read_csv(self.inputs.population_sub, usecols=["location", "population"])
pop = self.get_population(df_subnational)
df = df.merge(pop, on="location", validate="many_to_one", how="left")
if df.population.isna().any():
missing_locs = df[df.population.isna()].location.unique()
raise ValueError(f"Missing population data for {missing_locs}")
# Get covered countries
locations = df.location.unique()
ncountries = df_subnational.location.tolist() + list(self.aggregates.keys())
self._countries_covered = list(filter(lambda x: x not in ncountries, locations))
# Obtain per-capita metrics
df = df.assign(
total_vaccinations_per_hundred=(df.total_vaccinations * 100 / df.population).round(2),
people_vaccinated_per_hundred=(df.people_vaccinated * 100 / df.population).round(2),
people_fully_vaccinated_per_hundred=(df.people_fully_vaccinated * 100 / df.population).round(2),
total_boosters_per_hundred=(df.total_boosters * 100 / df.population).round(2),
new_vaccinations_smoothed_per_million=(df.new_vaccinations_smoothed * 1000000 / df.population).round(),
new_people_vaccinated_smoothed_per_hundred=(df.new_people_vaccinated_smoothed * 100 / df.population).round(
3
),
)
df.loc[:, "people_fully_vaccinated"] = df.people_fully_vaccinated.replace({0: pd.NA})
df.loc[df.people_fully_vaccinated.isnull(), "people_fully_vaccinated_per_hundred"] = pd.NA
df.loc[:, "total_boosters"] = df.total_boosters.replace({0: pd.NA})
df.loc[df.total_boosters.isnull(), "total_boosters_per_hundred"] = pd.NA
return df.drop(columns=["population"])
def pipe_vax_checks(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Sanity checks")
# Config
skip_countries = ["Pitcairn"]
# Sanity checks
df_to_check = df[-df.location.isin(skip_countries)]
if not (df_to_check.total_vaccinations.dropna() >= 0).all():
raise ValueError("Negative values found! Check values in `total_vaccinations`.")
if not (df_to_check.new_vaccinations_smoothed.dropna() >= 0).all():
raise ValueError("Negative values found! Check values in `new_vaccinations_smoothed`.")
if not (df_to_check.new_vaccinations_smoothed_per_million.dropna() <= 120000).all():
raise ValueError(" Huge values found! Check values in `new_vaccinations_smoothed_per_million`.")
return df
def pipe_to_int(self, df: pd.DataFrame) -> pd.DataFrame:
logger.info("Converting INT columns to int")
# Ensure Int types
cols = df.columns
count_cols = [col for col in self.column_names_int if col in cols]
df[count_cols] = df[count_cols].astype("Int64").fillna(pd.NA)
return df
def pipeline_vaccinations(self, df: pd.DataFrame) -> pd.DataFrame:
return (
df[
[
"date",
"location",
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"total_boosters",
]
]
.pipe(self.pipe_aggregates)
.pipe(self.pipe_daily)
.pipe(self.pipe_smoothed)
.pipe(self.pipe_capita)
.pipe(self.pipe_vax_checks)
.pipe(self.pipe_to_int)
.sort_values(by=["location", "date"])
)
def pipe_vaccinations_csv(self, df: pd.DataFrame, df_iso: pd.DataFrame) -> pd.DataFrame:
return df.merge(df_iso, on="location").rename(
columns={
"new_vaccinations_smoothed": "daily_vaccinations",
"new_vaccinations_smoothed_per_million": "daily_vaccinations_per_million",
"new_vaccinations": "daily_vaccinations_raw",
"new_people_vaccinated_smoothed": "daily_people_vaccinated",
"new_people_vaccinated_smoothed_per_hundred": "daily_people_vaccinated_per_hundred",
}
)[
[
"location",
"iso_code",
"date",
"total_vaccinations",
"people_vaccinated",
"people_fully_vaccinated",
"total_boosters",
"daily_vaccinations_raw",
"daily_vaccinations",
"total_vaccinations_per_hundred",
"people_vaccinated_per_hundred",
"people_fully_vaccinated_per_hundred",
"total_boosters_per_hundred",
"daily_vaccinations_per_million",
"daily_people_vaccinated",
"daily_people_vaccinated_per_hundred",
]
]
def pipe_vaccinations_json(self, df: pd.DataFrame) -> list:
location_iso_codes = df[["location", "iso_code"]].drop_duplicates().values.tolist()
metrics = [column for column in df.columns if column not in {"location", "iso_code"}]
df = df.assign(date=df.date.apply(clean_date))
return [
{
"country": location,
"iso_code": iso_code,
"data": [
{**x[i]}
for i, x in df.loc[(df.location == location) & (df.iso_code == iso_code), metrics]
.stack()
.groupby(level=0)
],
}
for location, iso_code in location_iso_codes
]
def pipe_manufacturer_select_cols(self, df: pd.DataFrame) -> pd.DataFrame:
return df[
[
"location",
"date",
"vaccine",
"total_vaccinations",
]
].sort_values(["location", "date", "vaccine"])
def pipe_manufacturer_add_eu(self, df: pd.DataFrame) -> pd.DataFrame:
eu_countries = pd.read_csv(self.inputs.eu_countries, usecols=["Country"], squeeze=True).tolist()
eu_manufacturer = (
df[df.location.isin(eu_countries)]
.pivot(index=["location", "vaccine"], columns="date", values="total_vaccinations")
.reset_index()
.melt(id_vars=["location", "vaccine"], var_name="date", value_name="total_vaccinations")
.sort_values("date")
)
eu_manufacturer["total_vaccinations"] = (
eu_manufacturer.groupby(["location", "vaccine"]).ffill().total_vaccinations
)
eu_manufacturer = (
eu_manufacturer[eu_manufacturer.date.astype(str) >= "2020-12-27"]
.groupby(["date", "vaccine"], as_index=False)
.sum()
.assign(location="European Union")
)
return pd.concat([df, eu_manufacturer])
def pipe_manufacturer_filter_dates(self, df: pd.DataFrame) -> pd.DataFrame:
return df[df.date.astype(str) >= "2020-12-01"]
def pipe_manufacturer_checks(self, df: pd.DataFrame) -> pd.DataFrame:
vaccines_wrong = set(df.vaccine).difference(VACCINES_ACCEPTED)
if vaccines_wrong:
raise ValueError(f"Invalid vaccines found in manufacturer file! {vaccines_wrong}")
return df
def pipeline_manufacturer(self, df: pd.DataFrame) -> pd.DataFrame:
return (
df.pipe(self.pipe_manufacturer_select_cols)
.pipe(self.pipe_manufacturer_add_eu)
.pipe(self.pipe_manufacturer_filter_dates)
.pipe(self.pipe_manufacturer_checks)
.pipe(self.pipe_to_int)
)
def pipe_age_checks(self, df: pd.DataFrame) -> pd.DataFrame:
if df[["location", "date", "age_group_min"]].isnull().sum().sum() != 0:
raise ValueError(
"Unexpected NaN values found in one (or several) fields from `location`, `date`, `age_group_min`"
)
if not (
is_numeric_dtype(df.people_vaccinated_per_hundred)
and is_numeric_dtype(df.people_fully_vaccinated_per_hundred)
and is_numeric_dtype(df.people_with_booster_per_hundred)
):
raise TypeError("Metrics should be numeric! E.g., 50.23")
return df
def pipe_metrics_format(self, df: pd.DataFrame) -> pd.DataFrame:
cols_metrics = [
"people_vaccinated_per_hundred",
"people_fully_vaccinated_per_hundred",
"people_with_booster_per_hundred",
]
df[cols_metrics] = df[cols_metrics].round(2)
return df
def pipe_age_group(self, df: pd.DataFrame) -> pd.DataFrame:
# Get age group
age_min = df.age_group_min.astype(str)
age_max = df.age_group_max.astype("Int64").apply(lambda x: str(x) if not pd.isna(x) else "+")
age_group = (age_min + "-" + age_max).replace(to_replace=r"-\+", value="+", regex=True)
return df.assign(age_group=age_group)
def pipe_age_output(self, df: pd.DataFrame) -> pd.DataFrame:
return df.dropna(
subset=[
"people_vaccinated_per_hundred",
"people_fully_vaccinated_per_hundred",
"people_with_booster_per_hundred",
],
| |
import numpy as np
def freq_gen(high_freq, low_freq, decades=7):
'''
Function that generates the frequency range used to investigate the
impedance response of an electrical circuit Frequency Generator with
logspaced freqencies
Parameters
----------
high_freq : single value (int or float)
initial frequency value (high frequency domain) [Hz]
high_freq : single value (int or float)
final frequency value (low frequency domain) [Hz]
decades : integer
number of frequency decades to be used as range. Default value
is set to be 7 [-]
Returns
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(high_freq) - np.log10(low_freq)
f_range = np.logspace(np.log10(high_freq), np.log10(low_freq),
np.around(decades*f_decades), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
def cir_RC_parallel(angular_freq, resistance='none', capacitance='none',
peak_frequency='none'):
'''
Function that simulates the impedance response of a resistor and a
capacitor in a parallel configuration.
String representation for this circuit: -(RC)-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
resistance : single value (int or float)
Solution resistance [ohm]
capacitance : single value (int or float)
Electrode capacitance [F]
peak_frequency : single value (int or float)
Peak frequency of RC circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [ohm]
'''
circuit = '-(RC)-'
if resistance == 'none':
resistance = (1/(capacitance*(2*np.pi*peak_frequency)))
elif capacitance == 'none':
capacitance = (1/(resistance*(2*np.pi*peak_frequency)))
# compute the impedance response as a complex array
Z_complex = (resistance/(1+resistance*capacitance*(angular_freq*1j)))
return Z_complex
def cir_RC_series(angular_freq, resistance='none', capacitance='none',
peak_frequency='none'):
'''
Function that simulates the impedance response of a resistor and a
capacitor in a series configuration.
This circuit configuration is used to simulate the response of an ideally
polarizable electrode, also known as a blocking electrode.
String representation for this circuit: -R-C-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
resistance : single value (int or float)
Solution resistance [ohm]
capacitance : single value (int or float)
Capacitance of an electrode surface [F]
peak_frequency : single value (int or float)
Peak frequency of RC circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [ohm]
'''
circuit = '-R-C-'
if (resistance, capacitance, peak_frequency) == 'none':
raise AssertionError('No circuit element value was provided. Cannot\
compute the impedance response')
elif (resistance, capacitance) == 'none':
raise AssertionError('Not enough circuit element values were provided.\
Cannot compute the impedance response')
elif resistance == 'none':
resistance = (1/(capacitance*(2*np.pi*peak_frequency)))
elif capacitance == 'none':
capacitance = (1/(resistance*(2*np.pi*peak_frequency)))
# compute the impedance response as a complex array
Z_complex = resistance + 1/(capacitance*(angular_freq*1j))
return Z_complex
def cir_RQ_parallel(angular_freq, resistance='none',
constant_phase_element='none', alpha='none',
peak_frequency='none'):
'''
Function that simulates the impedance response of a resistor and a
constant phase element in a parallel configuration.
String representation for this circuit: -(RQ)-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
resistance : single value (int or float)
Solution resistance [Ohm]
constant_phase_element : single value (int or float)
Constant phase angle [s^(alpha-1)/ohm]
alpha : single value -float
Exponent of the constant phase element. Should be a value between
0 and 1 [-]
peak_frequency : single value (int or float)
Peak frequency of RC circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [Ohm]
'''
circuit = '-(RQ)-'
if (resistance, constant_phase_element, alpha, peak_frequency) == 'none':
raise AssertionError('No circuit element value was provided. Cannot\
compute the impedance response')
elif (resistance, constant_phase_element, alpha) == 'none':
raise AssertionError('Not enough circuit element values were provided.\
Cannot compute the impedance response')
elif resistance == 'none':
resistance = (1/(constant_phase_element*(2*np.pi*peak_frequency
) ** alpha))
elif constant_phase_element == 'none':
constant_phase_element = (1/(resistance*(2*np.pi*peak_frequency
) ** alpha))
elif alpha == 'none':
alpha = np.log(constant_phase_element *
resistance)/np.log(1/(2*np.pi * peak_frequency))
Z_complex = (resistance/(1+resistance*constant_phase_element*(
angular_freq*1j)**alpha))
return Z_complex
def cir_RQ_series(angular_freq, resistance='none',
constant_phase_element='none', alpha='none',
peak_frequency='none'):
'''
Function that simulates the impedance response of a resistor and a
constant phase element in a series configuration.
This circuit configuration is used to simulate the response of a
blocking electrode with distribution of reactivity.
String representation for this circuit: -R-Q-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
resistance : single value (int or float)
Solution resistance [ohm]
constant_phase_element : single value (int or float)
Constant phase angle [s^(alpha-1)/ohm]
alpha : single value -float
Exponent of the constant phase element. Should be a value between
0 and 1 [-]
peak_frequency : single value (int or float)
Peak frequency of RC circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [Oom]
'''
circuit = '-R-Q-'
if (resistance, constant_phase_element, alpha, peak_frequency) == 'none':
raise AssertionError('No circuit element value was provided. Cannot\
compute the impedance response')
elif (resistance, constant_phase_element, alpha) == 'none':
raise AssertionError('Not enough circuit element values were provided.\
Cannot compute the impedance response')
elif resistance == 'none':
resistance = (1/(constant_phase_element*(2*np.pi*peak_frequency) **
alpha))
elif constant_phase_element == 'none':
constant_phase_element = (1/(resistance*(2*np.pi*peak_frequency) **
alpha))
elif alpha == 'none':
alpha = np.log(constant_phase_element *
resistance)/np.log(1/(2*np.pi * peak_frequency))
# compute the impedance response as a complex array
Z_complex = resistance + 1/(constant_phase_element*(
angular_freq*1j)**alpha)
return Z_complex
def cir_RsRC(angular_freq, solution_resistance,
parallel_resistance='none', capacitance='none',
peak_frequency='none'):
''''
Function that simulates the impedance response of a solution resistor in
series with a resistor in parallel with a capacitor.
This circuit configuration is used to simulate the response of an ideally
polarizable electrode, also known as a blocking electrode.
String representation for this circuit: -Rs-(RC)-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
solution_resistance : single value (int or float)
Solution resistance [ohm]
parallel_resistance : single value (int or float)
resistance of the element in parallel with
the capacitor [ohm]
capacitance : single value (int or float)
Capacitance of an electrode surface [F]
peak_frequency : single value (int or float)
Peak frequency of the parallel RC circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [Ohm]
'''
circuit = '-Rs-(RC)-'
# compute the impedance response as a complex array
if (parallel_resistance, capacitance, peak_frequency) == 'none':
raise AssertionError('No circuit element value was provided. Cannot\
compute the impedance response')
elif (parallel_resistance, capacitance) == 'none':
raise AssertionError('Not enough circuit element values were provided.\
Cannot compute the impedance response')
elif parallel_resistance == 'none':
parallel_resistance = (1/(capacitance*(2*np.pi*peak_frequency)))
elif capacitance == 'none':
capacitance = (1/(parallel_resistance*(2*np.pi*peak_frequency)))
Z_parallel = (parallel_resistance/(1 + parallel_resistance *
capacitance * (angular_freq*1j)))
Z_complex = solution_resistance + Z_parallel
return Z_complex
def cir_RsRQRQ(angular_freq, solution_resistance='none',
parallel_resistance_1='none', constant_phase_element_1='none',
alpha_1='none', parallel_resistance_2='none',
constant_phase_element_2='none', alpha_2='none',
peak_frequency_1='none', peak_frequency_2='none'):
'''
Function that simulates the impedance response of a solution resistor in
series with two sets of a resistor in parallel with a constant phase
elements.
String representation for this circuit: -Rs-(RQ)-(RQ)-
Parameters
----------
angular_freq : array-like
Angular frequency [1/s]
solution_resistance : single value (int or float)
Solution resistance [ohm]
parallel_resistance_1 : single value (int or float)
first combination of resistor in parallel with
constant phase element [ohm]
constant_phase_element_1 : single value (int or float)
First constant phas angle [s^(alpha-1)/ohm]
alpha_1 : single value -float
Exponent of the first constant phase element.
Should be a value between 0 and 1 [-]
parallel_resistance_2 : single value (int or float)
Second combination of resistor in parallel with
constant phase element [ohm]
constant_phase_element_2 : single value (int or float)
Second Constant phas angle [s^(alpha-1)/ohm]
alpha_2 : single value -float
Exponent of the second constant phase element.
Should be a value between 0 and 1 [-]
peak_frequency_1 : single value (int or float)
Peak frequency of the first parallel RQ circuit [Hz]
peak_frequency_2 : single value (int or float)
Peak frequency of the second parallel RQ circuit [Hz]
Returns
---------
Z_complex : array-like
impedance response of the circuit under investigation [Ohm]
'''
circuit = '-Rs-(RQ)-(RQ)-'
if (parallel_resistance_1, constant_phase_element_1, peak_frequency_1,
parallel_resistance_2, constant_phase_element_2,
peak_frequency_2) == 'none':
raise AssertionError('No circuit element value was provided. Cannot\
compute the impedance response')
elif (parallel_resistance_1, constant_phase_element_1,
parallel_resistance_2, constant_phase_element_2) == 'none':
raise AssertionError('Not enough circuit element values were provided.\
Cannot compute the impedance response')
if parallel_resistance_1 == 'none':
parallel_resistance_1 = (1/(constant_phase_element_1 *
(2*np.pi*peak_frequency_1)**alpha_1))
elif constant_phase_element_1 == 'none':
constant_phase_element_1 = (1/(parallel_resistance_1 *
(2*np.pi*peak_frequency_1)**alpha_1))
if parallel_resistance_2 == 'none':
parallel_resistance_2 = (1/(constant_phase_element_2 *
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Specify the brief poi_qac_personalized.py
"""
import os
import sys
import six
import re
import time
import numpy as np
import random
import datetime
import paddle.fluid as fluid
from datasets.base_dataset import BaseDataset
from utils.common_lib import convert_to_unicode
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf-8')
base_rule = re.compile("[\1\2]")
class PoiQacPersonalized(BaseDataset):
"""
PoiQacPersonalized dataset
"""
def __init__(self, flags):
super(PoiQacPersonalized, self).__init__(flags)
self.inited_dict = False
def parse_context(self, inputs):
"""
provide input context
"""
"""
set inputs_kv: please set key as the same as layer.data.name
notice:
(1)
If user defined "inputs key" is different from layer.data.name,
the frame will rewrite "inputs key" with layer.data.name
(2)
The param "inputs" will be passed to user defined nets class through
the nets class interface function : net(self, FLAGS, inputs),
"""
inputs['prefix_letter_id'] = fluid.layers.data(name="prefix_letter_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.prefix_word_id:
inputs['prefix_word_id'] = fluid.layers.data(name="prefix_word_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.use_geohash:
inputs['prefix_loc_geoid'] = fluid.layers.data(name="prefix_loc_geoid", shape=[40],
dtype="int64", lod_level=0)
inputs['pos_name_letter_id'] = fluid.layers.data(name="pos_name_letter_id", shape=[1],
dtype="int64", lod_level=1)
inputs['pos_addr_letter_id'] = fluid.layers.data(name="pos_addr_letter_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.poi_word_id:
inputs['pos_name_word_id'] = fluid.layers.data(name="pos_name_word_id", shape=[1],
dtype="int64", lod_level=1)
inputs['pos_addr_word_id'] = fluid.layers.data(name="pos_addr_word_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.use_geohash:
inputs['pos_loc_geoid'] = fluid.layers.data(name="pos_loc_geoid", shape=[40],
dtype="int64", lod_level=0)
if self.is_training:
inputs['neg_name_letter_id'] = fluid.layers.data(name="neg_name_letter_id", shape=[1],
dtype="int64", lod_level=1)
inputs['neg_addr_letter_id'] = fluid.layers.data(name="neg_addr_letter_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.poi_word_id:
inputs['neg_name_word_id'] = fluid.layers.data(name="neg_name_word_id", shape=[1],
dtype="int64", lod_level=1)
inputs['neg_addr_word_id'] = fluid.layers.data(name="neg_addr_word_id", shape=[1],
dtype="int64", lod_level=1)
if self._flags.use_geohash:
inputs['neg_loc_geoid'] = fluid.layers.data(name="neg_loc_geoid", shape=[40],
dtype="int64", lod_level=0)
else:
#for predict label
inputs['label'] = fluid.layers.data(name="label", shape=[1],
dtype="int64", lod_level=0)
inputs['qid'] = fluid.layers.data(name="qid", shape=[1],
dtype="int64", lod_level=0)
context = {"inputs": inputs}
#set debug list, print info during training
#context["debug_list"] = [key for key in inputs]
return context
def _init_dict(self):
"""
init dict
"""
if self.inited_dict:
return
if self._flags.platform in ('local-gpu', 'pserver-gpu', 'slurm'):
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
self.place = fluid.CUDAPlace(gpu_id)
else:
self.place = fluid.CPUPlace()
self.term_dict = {}
if self._flags.qac_dict_path is not None:
with open(self._flags.qac_dict_path, 'r') as f:
for line in f:
term, term_id = line.strip('\r\n').split('\t')
term = convert_to_unicode(term)
self.term_dict[term] = int(term_id)
self.inited_dict = True
sys.stderr.write("loaded term dict:%s\n" % (len(self.term_dict)))
def _pad_batch_data(self, insts, pad_idx, return_max_len=True, return_num_token=False):
"""
Pad the instances to the max sequence length in batch, and generate the
corresponding position data and attention bias.
"""
return_list = []
max_len = max(len(inst) for inst in insts)
# Any token included in dict can be used to pad, since the paddings' loss
# will be masked out by weights and make no effect on parameter gradients.
inst_data = np.array(
[inst + [pad_idx] * (max_len - len(inst)) for inst in insts])
return_list += [inst_data.astype("int64").reshape([-1, 1])]
if return_max_len:
return_list += [max_len]
if return_num_token:
num_token = 0
for inst in insts:
num_token += len(inst)
return_list += [num_token]
return return_list if len(return_list) > 1 else return_list[0]
def _get_ids(self, seg_info):
if len(seg_info) < 1:
return [0], [0]
bt = seg_info.split('\3')
if len(self.term_dict) < 1:
letter_ids = map(int, bt[0].split())[:self._flags.max_seq_len]
word_ids = map(int, bt[1].split())[:self._flags.max_seq_len]
return letter_ids, word_ids
rq = convert_to_unicode("".join(bt))
bl = [t for t in rq]
letter_ids = []
for t in bl:
letter_ids.append(self.term_dict.get(t.lower(), 1))
if len(letter_ids) >= self._flags.max_seq_len:
break
word_ids = []
for t in bt:
t = convert_to_unicode(t)
word_ids.append(self.term_dict.get(t.lower(), 1))
if len(word_ids) >= self._flags.max_seq_len:
break
return letter_ids, word_ids
def _get_poi_ids(self, poi_str, max_num=0):
if len(poi_str) < 1:
return []
ids = []
all_p = poi_str.split('\1')
pidx = range(0, len(all_p))
if max_num > 0 and len(all_p) > max_num:
#neg sample: last 10 is negative sampling(not disp)
neg_s_idx = len(all_p) - 10
pidx = [1, 2] + list(random.sample(pidx[3:neg_s_idx], max_num - 13)) + list(pidx[neg_s_idx:])
bids = set()
for x in pidx:
poi_seg = all_p[x].split('\2')
#raw_text: name, addr, xy
bid = poi_seg[0]
name_letter_id, name_word_id = self._get_ids(poi_seg[0])
addr_letter_id, addr_word_id = self._get_ids(poi_seg[1])
ghid = list(map(int, poi_seg[2].split(',')))
if not self.is_training and name_letter_id == [0]:
continue # empty name
if bid in bids:
continue
bids.add(bid)
ids.append([name_letter_id, name_word_id, addr_letter_id, addr_word_id, ghid])
return ids
def deal_timestamp(self, timestamp):
day_time_dt = datetime.datetime.fromtimestamp(timestamp)
day = day_time_dt.strftime("%w")
time = day_time_dt.strftime("%H.%M")
day_id = int(day) * 2
if 6 < float(time) < 18:
return day_id
else:
return day_id + 1
def parse_batch(self, data_gen):
"""
reader_batch must be true: only for train & loss_func is log_exp, other use parse_oneline
pos : neg = 1 : N
"""
def _get_lod(k):
return fluid.create_lod_tensor(np.array(batch_data[k][0]).reshape([-1, 1]),
[batch_data[k][1]], self.place)
batch_data = {}
keys = None
last_gh = None
task_data = None
process_batch = False
for gh, line in data_gen:
# print(gh)
# print(last_gh)
if last_gh == None:
last_gh = gh
task_data = [line]
elif last_gh != gh:
last_gh = gh
process_batch = True
else:
task_data.append(line)
if process_batch:
# print(len(task_data))
gen_data = []
for task_line in task_data:
# print(task_line)
for s in self.parse_oneline(task_line):
for k, v in s:
if k not in batch_data:
batch_data[k] = [[], []]
if not isinstance(v[0], list):
v = [v] #pos 1 to N
for j in v:
batch_data[k][0].extend(j)
batch_data[k][1].append(len(j))
if keys is None:
keys = [k for k, _ in s]
if len(batch_data[keys[0]][1]) == self._flags.train_batch_size:
# print(keys)
gen_data.append([(k, _get_lod(k)) for k in keys])
batch_data = {}
# if not self._flags.drop_last_batch and len(batch_data) != 0:
# gen_data.append([(k, _get_lod(k)) for k in keys])
# print(gen_data)
task_data = [line]
process_batch = False
if len(gen_data):
# print(len(gen_data))
yield gen_data
# if not self._flags.drop_last_batch and len(batch_data) != 0:
# yield [(k, _get_lod(k)) for k in keys]
def parse_oneline(self, line):
"""
datareader interface
"""
self._init_dict()
qid, timestamp, gh, prefix, pos_poi, neg_poi = line.strip("\r\n").split("\t")
# day_id = self.deal_timestamp(float(timestamp))
# day_input = [0] * 14
# day_input[day_id] = 1
logid = int(qid.split('_')[1])
#step2
prefix_loc_geoid = list(map(int, gh.split(',')))
prefix_letter_id, prefix_word_id = self._get_ids(prefix)
prefix_input = [("prefix_letter_id", prefix_letter_id)]
if self._flags.prefix_word_id:
prefix_input.append(("prefix_word_id", prefix_word_id))
if self._flags.use_geohash:
prefix_input.append(("prefix_loc_geoid", prefix_loc_geoid))
#step3
pos_ids = self._get_poi_ids(pos_poi)
pos_num = len(pos_ids)
max_num = 0
if self.is_training:
max_num = max(20, self._flags.neg_sample_num) #last 10 is neg sample
neg_ids = self._get_poi_ids(neg_poi, max_num=max_num)
#if not train, add all pois
if not self.is_training:
pos_ids.extend(neg_ids[:-10] if len(neg_ids) > 10 else neg_ids)
if len(pos_ids) < 1:
pos_ids.append([[0], [0], [0], [0], [0] * 40, [0]])
#step4
idx = 0
for pos_id in pos_ids:
pos_input = [("pos_name_letter_id", pos_id[0]), \
("pos_addr_letter_id", pos_id[2])]
if self._flags.poi_word_id:
pos_input.append(("pos_name_word_id", pos_id[1]))
pos_input.append(("pos_addr_word_id", pos_id[3]))
if self._flags.use_geohash:
pos_input.append(("pos_loc_geoid", pos_id[4]))
if self.is_training:
if len(neg_ids) > self._flags.neg_sample_num:
#Noise Contrastive Estimation
#if self._flags.neg_sample_num > 3:
# nids_sample = neg_ids[:3]
nids_sample = random.sample(neg_ids, self._flags.neg_sample_num)
else:
nids_sample = neg_ids
if self._flags.reader_batch:
if len(nids_sample) != self._flags.neg_sample_num:
continue
neg_batch = [[], [], [], [], []]
for neg_id in nids_sample:
for i in range(len(neg_batch)):
neg_batch[i].append(neg_id[i])
neg_input = [("neg_name_letter_id", neg_batch[0]), \
("neg_addr_letter_id", neg_batch[2])]
if self._flags.poi_word_id:
neg_input.append(("neg_name_word_id", neg_batch[1]))
neg_input.append(("neg_addr_word_id", neg_batch[3]))
if self._flags.use_geohash:
neg_input.append(("neg_loc_geoid", neg_batch[4]))
yield prefix_input + pos_input + neg_input
else:
for neg_id in nids_sample:
neg_input = [("neg_name_letter_id", neg_id[0]), \
("neg_addr_letter_id", neg_id[2])]
if self._flags.poi_word_id:
neg_input.append(("neg_name_word_id", neg_id[1]))
neg_input.append(("neg_addr_word_id", neg_id[3]))
if self._flags.use_geohash:
neg_input.append(("neg_loc_geoid", neg_id[4]))
yield prefix_input + pos_input + neg_input
else:
label = int(idx < pos_num)
yield prefix_input + pos_input + [("label", [label]), ("qid", [logid])]
idx += 1
# if __name__ == '__main__':
# from utils import flags
# from utils.load_conf_file import LoadConfFile
# FLAGS = flags.FLAGS
# flags.DEFINE_custom("conf_file", "./conf/test/test.conf",
# #"conf file", action=LoadConfFile, sec_name="Train")
# "conf file", action=LoadConfFile, sec_name="Evaluate")
# sys.stderr.write('----------- Configuration Arguments -----------\n')
# for arg, value in sorted(flags.get_flags_dict().items()):
# sys.stderr.write('%s: %s\n' % (arg, value))
# sys.stderr.write('------------------------------------------------\n')
# dataset_instance = PoiQacPersonalized(FLAGS)
# def _dump_vec(data, name):
# print("%s\t%s" % (name, " ".join(map(str, np.array(data)))))
# def _data_generator():
# """
# stdin sample generator: read from stdin
# """
# for line in sys.stdin:
# if not line.strip():
# continue
# yield line
# if FLAGS.reader_batch:
# for sample in dataset_instance.parse_batch(_data_generator):
# _dump_vec(sample[0][1], 'prefix_letter_id')
# _dump_vec(sample[1][1], 'prefix_loc_geoid')
# _dump_vec(sample[2][1], 'pos_name_letter_id')
# _dump_vec(sample[3][1], 'pos_addr_letter_id')
# _dump_vec(sample[6][1], 'pos_loc_geoid')
# _dump_vec(sample[7][1], 'neg_name_letter_id or label')
# else:
# for line in sys.stdin:
# for sample in | |
<filename>src/pickle_parser.py<gh_stars>1-10
'''
#######################################################################################################
#######################################################################################################
Copyright 2018 Northeastern University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Goal: read the parsed original traffic record (i.e., _client.pickle and _server.pickle)
from pcap_folder/original, then create new pickles with specified changes in pcap_folder
Usage:
python replay_parserp.py --pcap_folder=[] --Side --PNum --Action --Specification
Mandatory arguments:
pcap_folder: This is the folder containing pcap file and client_ip.txt
PcapDir is where the replay pcap (pickle) files are
Side: Client or Server modification
PNum: the packet that need to be modified
Action: what modification need to be made:
Delete : Delete the specified packet from the trace
Random : Randomize the whole packet and store the randomized packet into /random/randomClient.pickle and randomServer.pickle
XOR : Invert every bit in this packet and store the randomized packet into /random/randomClient.pickle and randomServer.pickle
Prepend: Prepend random packets in front of the original packets
ReplaceW : Replace multiple region with given strings specified by Specification
ReplaceR : Replace multiple region with random bytes(random pickles) specified by Specification
TTLP : For TTL probe, where the server is expecting original packets but receives random packets (original packets has limited TTL)
Specification: specify how to make the modification on the trace
When used in ReplaceW, it should be {(x,y):'something'...}, which would replace the payload from x to y byte to 'something'
When used in ReplaceR, it should be {(x1,y1),(x2,y2)...}, which would replace the payload from x1 to y1 byte to
the random payload from x1 to y1 byte(loaded from random pickle) and so on...
#######################################################################################################
#######################################################################################################
'''
import sys, os, pickle, copy, mimetools, StringIO, email, re, random, string
import python_lib
from python_lib import *
def MultiReplace(payload, regions, rpayload):
# When randomPayload is '', that means we need to replace payload with the strings stores in regions
# e.g. regions[(1,2):'haha']
if rpayload == '':
for region in regions:
L = region[0]
R = region[1]
payload = Replace(payload, L, R, regions[region])
else:
for region in regions:
L = region[0]
R = region[1]
payload = Replace(payload, L, R, rpayload[L:R])
return payload
def Replace(payload, L, R, replaceS):
# replace the bytes from L to R to replaceS
payload = payload.decode('hex')
plen = len(payload)
if R > plen or L < 0 :
print '\n\t\t ***Attention***Payload length is ',plen,'BUT L bond is ', L, 'R bond is',R,\
'Returning original payload'
else:
LeftPad = payload[: L]
RightPad = payload[R :]
payload = LeftPad + replaceS + RightPad
payload = payload.encode('hex')
return payload
def to_list(chain, offset):
return [chain[i:i+offset] for i in range(0, len(chain), offset)]
# Bit hex string operations
def bin2str(chain):
return ''.join((chr(int(chain[i:i+8], 2)) for i in range(0, len(chain), 8)))
def bin2hex(chain):
return ''.join((hex(int(chain[i:i+8], 2))[2:] for i in range(0, len(chain), 8)))
def str2bin(chain):
return ''.join((bin(ord(c))[2:].zfill(8) for c in chain))
def str2hex(chain):
return ''.join((hex(ord(c))[2:] for c in chain))
def hex2bin(chain):
return ''.join((bin(int(chain[i:i+2], 16))[2:].zfill(8) for i in range(0, len(chain), 2)))
def hex2str(chain):
return ''.join((chr(int(chain[i:i+2], 16)) for i in range(0, len(chain), 2)))
def XorPayload(payload):
payload = payload.decode('hex')
bpayload = str2bin(payload)
newb = ''
for char in bpayload:
if char == '0':
newb += '1'
else:
newb += '0'
newpayload = bin2str(newb).encode('hex')
return newpayload
# Randomize the whole payload in this packet
def Randomize(payload):
# randomize the whole payload except the bytes from L to R
payload = payload.decode('hex')
plen = len(payload)
payload = ''.join(chr(random.getrandbits(8)) for x in range(plen))
payload = payload.encode('hex')
return payload
def RandomLoad(pcapDir, side, PacketNum, Protocol, csp):
# Client Side
if side == 'Client':
clientQ, udpClientPorts, tcpCSPs, replayName = \
pickle.load(open(pcapDir +'/random/randomClient.pickle','r'))
rpayload = clientQ[PacketNum-1].payload
# Server Side
else:
serverQ, tmpLUT, tmpgetLUT, udpServers, tcpServerPorts, replayName = \
pickle.load(open(pcapDir+'/random/randomServer.pickle','r'))
if Protocol == 'udp':
rpayload = serverQ[Protocol][csp][PacketNum-1].payload
else:
rpayload = serverQ[Protocol][csp][PacketNum-1].response_list[0].payload
return rpayload
def RandomDump(pcapDir, clientQ, udpClientPorts, tcpCSPs, replay_name, serverQ, LUT, getLUT, udpServers, tcpServerPorts):
if not os.path.isdir(pcapDir+'/random'):
os.makedirs(pcapDir+'/random')
pickle.dump((clientQ, udpClientPorts, list(tcpCSPs), replay_name),
open((pcapDir+'/random/randomClient.pickle'), "w" ), 2)
pickle.dump((serverQ, LUT, getLUT, udpServers, tcpServerPorts, replay_name),
open((pcapDir+'/random/randomServer.pickle'), "w" ), 2)
# Client Modification:
# One thing to keep in mind is that the LUT is based on the payload in Client packet
# Thus the server LUT needs to be modified along with Client packet
def CModify(PcapDir, clientQ, LUT, getLUT, Prot, PNum, Action, Specification):
# hash Sample size is fixed as 400
toHash = clientQ[PNum - 1].payload.decode('hex')[:400]
theHash = hash(toHash)
# Load the original value from LUT
(replay_name, csp) = LUT[Prot][theHash]
# Remove this entry in the hash table
LUT[Prot].pop(theHash, None)
if 'Random' == Action:
clientQ[PNum-1].payload = Randomize(clientQ[PNum-1].payload)
elif 'XOR' == Action:
clientQ[PNum-1].payload = XorPayload(clientQ[PNum-1].payload)
elif 'Delete' == Action:
# print '\n\t Client Q Before deleting ::',clientQ
clientQ.pop(PNum-1)
# print '\n\t Client Q after deleting ::',clientQ
elif 'Prepend' == Action:
preNum = Specification[0]
preLen = Specification[1]
random.seed(Action)
rstring = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(preLen))
for i in xrange(preNum):
preQ = RequestSet(rstring.encode('hex'), clientQ[0].c_s_pair, None, clientQ[0].timestamp)
clientQ.insert(0, preQ)
# print '\n\t Client Q after prepending ::',TMPclientQ
elif 'ReplaceW' == Action:
regions = Specification
clientQ[PNum-1].payload = MultiReplace(clientQ[PNum-1].payload, regions, '')
# print '\n\t After ReplaceW ::',TMPclientQ[MPacketNum-1].payload.decode('hex')
elif 'ReplaceR' == Action:
regions = Specification
rpayload = RandomLoad(PcapDir, 'Client', PNum, Prot, csp)
rpayload = rpayload.decode('hex')
clientQ[PNum-1].payload = MultiReplace(clientQ[PNum-1].payload, regions, rpayload)
print '\r\n RRR', clientQ[PNum-1].payload.decode('hex')
elif 'TTLP' == Action:
# Fixed random seed, since the server need to know what to expect
random.seed('TTLP')
for i in xrange(PNum):
Qlen = len(clientQ[i].payload.decode('hex'))
rstring = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(Qlen))
clientQ[i].payload = rstring.encode('hex')
else:
print '\n\t Unrecognized Action,', Action, ' No ACTION taken HERE in CModify'
# Restore the entry into LUT with new payload
toHash = clientQ[PNum - 1].payload.decode('hex')[:400]
theHash = hash(toHash)
LUT[Prot][theHash] = (replay_name, csp)
return clientQ, LUT, getLUT
# Server Modification:
def SModify(PcapDir, serverQ, PNum, Prot, csp, Action, Specification):
# UDP server changes
if Prot == 'udp':
# print '\n\t Server packet', MPacketNum, ' Before ::',serverQ[MProtocol][csp][MPacketNum-1].payload.decode('hex')
if 'Random' == Action:
serverQ[Prot][csp][PNum-1].payload = \
Randomize(serverQ[Prot][csp][PNum-1].payload)
elif 'XOR' == Action:
serverQ[Prot][csp][PNum-1].payload = \
XorPayload(serverQ[Prot][csp][PNum-1].payload)
elif 'Delete' == Action:
# print '\n\t Server Q Before deleting ::',serverQ[MProtocol][csp]
serverQ[Prot][csp].pop(PNum-1)
# print '\n\t Server Q after deleting ::',serverQ[MProtocol][csp]
elif 'ReplaceW' == Action:
regions = Specification
serverQ[Prot][csp][PNum-1].payload = \
MultiReplace(serverQ[Prot][csp][PNum-1].payload, regions, '')
# print '\n\t After ReplaceW ::',serverQ[MProtocol][csp][MPacketNum-1].payload.decode('hex')
elif 'ReplaceR' == Action:
regions = Specification
rpayload = RandomLoad(PcapDir, 'Server', PNum, 'udp', csp)
rpayload = rpayload.decode('hex')
serverQ[Prot][csp][PNum-1].payload = \
MultiReplace(serverQ[Prot][csp][PNum-1].payload,regions, rpayload)
else:
print '\n\t Unrecognized Action,', Action, ' No ACTION taken HERE in SModify UDP'
#TCP server changes
else:
if 'Random' == Action:
serverQ[Prot][csp][PNum-1].response_list[0].payload = \
Randomize(serverQ[Prot][csp][PNum-1].response_list[0].payload)
elif 'XOR' == Action:
serverQ[Prot][csp][PNum-1].response_list[0].payload = \
XorPayload(serverQ[Prot][csp][PNum-1].response_list[0].payload)
elif 'Delete' == Action:
# print '\n\t Server Q Before deleting ::',serverQ[MProtocol][csp]
serverQ[Prot][csp].pop(PNum-1)
# print '\n\t Server Q after deleting ::',serverQ[MProtocol][csp]
elif 'ReplaceW' == Action:
regions = Specification
serverQ[Prot][csp][PNum-1].response_list[0].payload = \
MultiReplace(serverQ[Prot][csp][PNum-1].response_list[0].payload, regions, '')
# print '\n\t After ReplaceW ::',serverQ[MProtocol][csp][MPacketNum-1].response_list[0].payload.decode('hex')
elif 'ReplaceR' == Action:
regions = Specification
rpayload = RandomLoad(PcapDir, 'Server', PNum, 'tcp', csp)
rpayload = rpayload.decode('hex')
serverQ[Prot][csp][PNum-1].response_list[0].payload = \
MultiReplace(serverQ[Prot][csp][PNum-1].response_list[0].payload, regions, rpayload)
print '\r\n RRR', serverQ[Prot][csp][PNum-1].response_list[0].payload.decode('hex')
else:
print '\n\t Unrecognized Action,', Action, ' No ACTION taken HERE in SModify TCP'
return serverQ
# Modify the Qs and LUT as specified
def Modification(PcapDir, clientQ, serverQ, LUT, getLUT, Prot, csp, Side, PNum, Action, Specification):
if Side == 'Client':
# Client modification
clientQ, LUT, getLUT = CModify(PcapDir, clientQ, LUT, getLUT, Prot, PNum, Action, Specification)
elif Side == 'Server':
# Server modification
serverQ = SModify(PcapDir, serverQ, PNum, Prot, csp, Action, Specification)
return clientQ, serverQ, LUT, getLUT
def run(PcapDir, Side, PNum, Action, Specification):
'''##########################################################'''
OriginalDir = PcapDir + '/Original'
clientPickle = ''
serverPickle = ''
# First load the original pickles from pcap_folder/original directory
for file in os.listdir(OriginalDir):
if file.endswith('client_all.pickle'):
clientPickle = os.path.abspath(OriginalDir) + '/' + file
elif file.endswith('server_all.pickle'):
serverPickle = os.path.abspath(OriginalDir) + '/' + file
serverQ, LUT, getLUT, udpServers, tcpServerPorts, replayName = \
pickle.load(open(serverPickle, 'r'))
clientQ, udpClientPorts, tcpCSPs, replayName = pickle.load(open(clientPickle, 'r'))
# There should only be one | |
#!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
from gazedb import GazeDB
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
import keras
from keras import backend as K
from datasources import Video, Webcam
from models import ELG
import util.gaze
from keras import backend as K
if __name__ == '__main__':
# Initialise the obj
database = GazeDB()
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
# from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in K.tensorflow_backend._get_available_gpus()
if d.device_type == 'GPU']
print("\t\t GPUS: [{}]".format(gpus))
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Record output frames to file if requested
if args.record_video:
video_out = None
video_out_queue = queue.Queue()
video_out_should_stop = False
video_out_done = threading.Condition()
video_recorder = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'XVID'),
20, (1280, 720),
)
def _record_frame():
global video_out
last_frame_time = None
out_fps = 60
out_frame_interval = 1.0 / out_fps
while not video_out_should_stop:
frame_index = video_out_queue.get()
if frame_index is None:
break
# assert frame_index in data_source._frames
frame = data_source._frames[frame_index]['bgr']
h, w, _ = frame.shape
if video_out is None:
video_out = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'XVID'),
60, (w, h),
)
now_time = time.time()
if last_frame_time is not None:
time_diff = now_time - last_frame_time
while time_diff > 0.0:
# video_out.write(frame)
time_diff -= out_frame_interval
last_frame_time = now_time
video_out.release()
with video_out_done:
video_out_done.notify_all()
# record_thread = threading.Thread(target=_record_frame, name='record')
# record_thread.daemon = True
# record_thread.start()
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
resized_img = cv.resize(next_frame['bgr'], (1280, 720))
cv.imshow('vis', resized_img)
video_recorder.write(resized_img)
# cv.imshow('vis', flipped_bgr)
if args.record_video:
video_out_queue.put_nowait(next_frame_index)
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
line_lengths = []
look_flag = False
for j in range(batch_size):
print("Batch Size, J: ", batch_size, j)
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
# for landmark in frame_landmarks[f][:-1]:
# cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
# color=(0, 0, 255), markerType=cv.MARKER_STAR,
# markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
# cv.drawMarker( # Eyeball centre
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
# thickness=1, line_type=cv.LINE_AA,
# )
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
bgr, line_length = util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
line_lengths.append(line_length)
else:
gaze_history.clear()
if can_use_eyelid:
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if j == 1:
print("\n\n\t\t Line Lengths: ", line_lengths)
print("\t\t Frame Index: ", frame['frame_index'])
# print("\n\n\t\t Face: ", (np.round(face[2] + 5).astype(np.int32), np.round(face[3] - 10).astype(np.int32)))
for line_length in line_lengths:
if line_length < 40:
look_flag = True
if look_flag and line_lengths:
text_look = "Looking"
print("\t LOOKING")
rgb_image = cv.cvtColor(frame['bgr'], cv.COLOR_BGR2RGB)
database.MarkingProcess(img = rgb_image, bboxs = frame['faces'], lookingflag = look_flag, frameindex = frame['frame_index'])
else:
text_look = "Not Looking"
print("\t Not Looking")
rgb_image = cv.cvtColor(frame['bgr'], cv.COLOR_BGR2RGB)
database.MarkingProcess(img = rgb_image, bboxs = frame['faces'], lookingflag = look_flag, frameindex = frame['frame_index'])
cv.putText(bgr, text_look, (np.round(face[0] + 5).astype(np.int32), np.round(face[1] - 10).astype(np.int32)),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 255), thickness=1, lineType=cv.LINE_AA)
if not args.headless:
resized_img = cv.resize(bgr, (1280, 720))
cv.imshow('vis', resized_img)
video_recorder.write(resized_img)
# cv.imshow('vis', bgr) 1.14.0
last_frame_index = frame_index
# Record frame?
if args.record_video:
video_out_queue.put_nowait(frame_index)
# Quit?
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
| |
<reponame>AusDTO/observatory-app
# Pyhton 3.6.4 script to ingest accounts data stream from BigQuery to DTA
# cloud.gov RDS
# schedule interval for datasets are as follows
from __future__ import print_function
import datetime
import os
# import tablib
import pathlib
from airflow import models
from airflow.models import Variable
from airflow.operators import python_operator
from airflow.contrib.operators import bigquery_operator
from airflow.contrib.operators import bigquery_get_data
import logging
import json
import requests
import re
import six
import logging
from requests.exceptions import HTTPError
import dobs_signin
import dobs_constants
import dobs_data_ops
# token = signin.ACCESS_TOKEN
# header_token = {'Authorization': 'Bearer ' + token}
default_dag_args = {
# The start_date describes when a DAG is valid / can be run. Set this to a
# fixed point in time rather than dynamically, since it is evaluated every
# time a DAG is parsed. See:
# https://airflow.apache.org/faq.html#what-s-the-deal-with-start-date
'start_date': datetime.datetime(2020, 9, 20),
'retries': 0,
'retry_delay': datetime.timedelta(minutes=5)
}
with models.DAG(
'bigquery_output_update_prototype',
# schedule_interval=datetime.timedelta(days=1),
schedule_interval='0 20 * * *',
catchup=False,
on_failure_callback = None,
default_args=default_dag_args) as dag:
project_id = models.Variable.get('GCP_PROJECT', 'dta-ga-bigquery')
# BSQL script to pull the data from BigQuery
bigquery_data_type1 = bigquery_operator.BigQueryOperator(
task_id='bigquery_data_type1',
priority='BATCH',
bql=pathlib.Path(dobs_constants.DAGS_DIR + "/bq_observatory_service/bq_sql_exec_basics_weekly.sql").read_text(), use_legacy_sql=False)
bigquery_data_type2 = bigquery_operator.BigQueryOperator(
task_id='bigquery_data_type2',
priority='BATCH',
bql=pathlib.Path(dobs_constants.DAGS_DIR + "/bq_observatory_service/bq_sql_exec_basics_daily.sql").read_text(), use_legacy_sql=False)
bigquery_data_type3 = bigquery_operator.BigQueryOperator(
task_id='bigquery_data_type3',
priority='BATCH',
bql=pathlib.Path(dobs_constants.DAGS_DIR + "/bq_observatory_service/bq_sql_exec_basics_hourly.sql").read_text(), use_legacy_sql=False)
# BigQuery data fetch
# bigquery_fetch_type1_1 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type1_1',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE1_1
# )
bigquery_fetch_type1_1_toppages = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_1_toppages',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_1_TP
)
bigquery_fetch_type1_1_topgrowth = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_1_topgrowth',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_1_TG
)
# bigquery_fetch_type1_2 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type1_2',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE1_2
# )
# bigquery_fetch_type1_3 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type1_3',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE1_3
# )
bigquery_fetch_type1_3_toppages = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_3_toppages',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_3_TP
)
bigquery_fetch_type1_3_topgrowth = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_3_topgrowth',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_3_TG
)
# bigquery_fetch_type1_4 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type1_4',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE1_4
# )
bigquery_fetch_type1_4_toppages = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_4_toppages',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_4_TP
)
bigquery_fetch_type1_4_topgrowth = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_4_topgrowth',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_4_TG
)
# bigquery_fetch_type1_5 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type1_5',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE1_5
# )
bigquery_fetch_type1_5_toppages = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_5_toppages',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_5_TP
)
bigquery_fetch_type1_5_topgrowth = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type1_5_topgrowth',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE1_5_TG
)
# Type Daily BigQuery Tables Fetch
bigquery_fetch_type2_1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1
)
# Type Daily Top Pages
bigquery_fetch_type2_1_tpgs_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY1
)
bigquery_fetch_type2_1_tpgs_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id=dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY2
)
bigquery_fetch_type2_1_tpgs_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY3
)
bigquery_fetch_type2_1_tpgs_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY4
)
bigquery_fetch_type2_1_tpgs_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY5
)
bigquery_fetch_type2_1_tpgs_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY6
)
bigquery_fetch_type2_1_tpgs_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tpgs_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TP_DAY7
)
# Type Daily Top Growth Pages
bigquery_fetch_type2_1_tgw_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY1
)
bigquery_fetch_type2_1_tgw_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id=dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY2
)
bigquery_fetch_type2_1_tgw_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY3
)
bigquery_fetch_type2_1_tgw_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY4
)
bigquery_fetch_type2_1_tgw_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY5
)
bigquery_fetch_type2_1_tgw_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY6
)
bigquery_fetch_type2_1_tgw_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_1_tgw_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_1_TG_DAY7
)
# bigquery_fetch_type2_2 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type2_2',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE2_2
# )
bigquery_fetch_type2_3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3
)
bigquery_fetch_type2_3_tpgs_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY1
)
bigquery_fetch_type2_3_tpgs_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY2
)
bigquery_fetch_type2_3_tpgs_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY3
)
bigquery_fetch_type2_3_tpgs_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY4
)
bigquery_fetch_type2_3_tpgs_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY5
)
bigquery_fetch_type2_3_tpgs_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY6
)
bigquery_fetch_type2_3_tpgs_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tpgs_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TP_DAY7
)
bigquery_fetch_type2_3_tgw_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY1
)
bigquery_fetch_type2_3_tgw_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY2
)
bigquery_fetch_type2_3_tgw_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY3
)
bigquery_fetch_type2_3_tgw_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY4
)
bigquery_fetch_type2_3_tgw_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY5
)
bigquery_fetch_type2_3_tgw_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY6
)
bigquery_fetch_type2_3_tgw_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_3_tgw_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_3_TG_DAY7
)
bigquery_fetch_type2_4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4
)
bigquery_fetch_type2_4_tpgs_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY1
)
bigquery_fetch_type2_4_tpgs_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY2
)
bigquery_fetch_type2_4_tpgs_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY3
)
bigquery_fetch_type2_4_tpgs_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY4
)
bigquery_fetch_type2_4_tpgs_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY5
)
bigquery_fetch_type2_4_tpgs_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY6
)
bigquery_fetch_type2_4_tpgs_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tpgs_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TP_DAY7
)
bigquery_fetch_type2_4_tgw_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY1
)
bigquery_fetch_type2_4_tgw_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY2
)
bigquery_fetch_type2_4_tgw_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY3
)
bigquery_fetch_type2_4_tgw_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY4
)
bigquery_fetch_type2_4_tgw_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY5
)
bigquery_fetch_type2_4_tgw_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY6
)
bigquery_fetch_type2_4_tgw_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_4_tgw_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_4_TG_DAY7
)
bigquery_fetch_type2_5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5
)
bigquery_fetch_type2_5_tpgs_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY1
)
bigquery_fetch_type2_5_tpgs_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY2
)
bigquery_fetch_type2_5_tpgs_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY3
)
bigquery_fetch_type2_5_tpgs_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY4
)
bigquery_fetch_type2_5_tpgs_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY5
)
bigquery_fetch_type2_5_tpgs_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY6
)
bigquery_fetch_type2_5_tpgs_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tpgs_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TP_DAY7
)
bigquery_fetch_type2_5_tgw_day1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY1
)
bigquery_fetch_type2_5_tgw_day2 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day2',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY2
)
bigquery_fetch_type2_5_tgw_day3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY3
)
bigquery_fetch_type2_5_tgw_day4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY4
)
bigquery_fetch_type2_5_tgw_day5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY5
)
bigquery_fetch_type2_5_tgw_day6 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day6',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY6
)
bigquery_fetch_type2_5_tgw_day7 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type2_5_tgw_day7',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE2_5_TG_DAY7
)
bigquery_fetch_type3_1 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type3_1',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE3_1
)
# bigquery_fetch_type3_2 = bigquery_get_data.BigQueryGetDataOperator(
# task_id='bigquery_fetch_type3_2',
# dataset_id= dobs_constants.DATASET_EXEC_BASICS,
# table_id= dobs_constants.TABLE_EXEC_TYPE3_2
# )
bigquery_fetch_type3_3 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type3_3',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE3_3
)
bigquery_fetch_type3_4 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type3_4',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE3_4
)
bigquery_fetch_type3_5 = bigquery_get_data.BigQueryGetDataOperator(
task_id='bigquery_fetch_type3_5',
dataset_id= dobs_constants.DATASET_EXEC_BASICS,
table_id= dobs_constants.TABLE_EXEC_TYPE3_5
)
# Prepare analytics data for delivery into RDS
#Weekly data ouput transform function
def prepare_outtype1(data_tp, data_tg):
output = []
top10g = []
top10p = []
for datum in data_tp:
property_id, hostname, users, newUsers, returningUsers, pageviews, time_on_page, bounce_rate,sessions, aveSession, pagesPerSession, aveSessionDuration, pageviews_tp, pagetitle_tp, pageurl_tp, trend_percent, top_rank_tp, week_start, week_end = datum
top10p.append({"pageUrl": pageurl_tp, "pageTitle": pagetitle_tp, "pageViews" : pageviews_tp, "rank": top_rank_tp, "percentage": trend_percent})
for datum in data_tg:
property_id, hostname, users, newUsers, returningUsers, pageviews, time_on_page, bounce_rate, sessions, aveSession, pagesPerSession, aveSessionDuration, pageviews_tg, pagetitle_tg, pageurl_tg, growth_percent, top_rank_tg, week_start, week_end = datum
top10g.append({"pageUrl": pageurl_tg, "pageTitle": pagetitle_tg, "pageViews" : pageviews_tg, "rank": top_rank_tg, "percentage": growth_percent})
property_id, hostname, users, newUsers, returningUsers, pageviews, time_on_page, bounce_rate, sessions, aveSession, pagesPerSession, aveSessionDuration, pageviews_tg, pagetitle_tg, pageurl_tg, growth_percent, top_rank_tg, week_start, week_end = datum
output.append({"dateEnding": week_end, "users": users, "pageViews": pageviews, "timeOnPage": time_on_page,"bounceRate": bounce_rate, "sessions": sessions, "aveSessionsPerUser": aveSession, "pagesPerSession": pagesPerSession , "aveSessionDuration": aveSessionDuration, "newUsers": newUsers ,"returningUsers": returningUsers, "topTenGrowth": top10g, "topTenPageViews": top10p})
jdata = {
"output": output
}
logging.info(jdata)
return jdata, datum[0]
# Daily data output transform function
def prepare_outtype2(data_, data_tp1, data_tg1,data_tp2, data_tg2,data_tp3, data_tg3,data_tp4, data_tg4, data_tp5, data_tg5,data_tp6, data_tg6,data_tp7, data_tg7):
output = []
top10g1 = []
top10p1 = []
top10g2 = []
top10p2 = []
top10g3 = []
top10p3 = []
top10g4 = []
top10p4 = []
top10g5 = []
top10p5 = []
top10g6 = []
top10p6 = []
top10g7 = []
top10p7 = []
for datum_p1 in data_tp1:
property_id, reg_domain, pageviews_tp, pagetitle_tp, pageurl_tp, trend_percent, top_rank_tp, visit_date = datum_p1
top10p1.append({"pageUrl": pageurl_tp, "pageTitle": pagetitle_tp, "pageViews": pageviews_tp, "rank": top_rank_tp, "percentage": trend_percent})
for datum_g1 in data_tg1:
property_id, reg_domain, pageviews_tg, pagetitle_tg, pageurl_tg, growth_percent, top_rank_tg, visit_date = datum_g1
top10g1.append({"pageUrl": pageurl_tg, "pageTitle": pagetitle_tg, "pageViews": pageviews_tg, "rank": top_rank_tg, "percentage": growth_percent})
visitdate_1 = datum_g1[7]
for datum_p2 in data_tp2:
property_id, reg_domain, pageviews_tp, pagetitle_tp, pageurl_tp, trend_percent, top_rank_tp, visit_date = datum_p2
top10p2.append({"pageUrl": pageurl_tp, "pageTitle": pagetitle_tp, "pageViews": pageviews_tp, "rank": top_rank_tp, "percentage": trend_percent})
for datum_g2 in data_tg2:
property_id, reg_domain, pageviews_tg, pagetitle_tg, pageurl_tg, growth_percent, top_rank_tg, visit_date = datum_g2
top10g2.append({"pageUrl": pageurl_tg, "pageTitle": pagetitle_tg, "pageViews": pageviews_tg, "rank": top_rank_tg, "percentage": growth_percent})
visitdate_2 = datum_g2[7]
for datum_p3 in data_tp3:
property_id, reg_domain, pageviews_tp, pagetitle_tp, pageurl_tp, trend_percent, top_rank_tp, visit_date = datum_p3
top10p3.append({"pageUrl": pageurl_tp, "pageTitle": pagetitle_tp, "pageViews": | |
<gh_stars>0
import itertools
import subprocess
import django
from django.core.signals import request_started
from django.core.wsgi import get_wsgi_application
from django.db import close_old_connections
from django.test import modify_settings
from django.test import override_settings
from django.test.client import RequestFactory
from django.utils.functional import SimpleLazyObject
from django.views.generic import TemplateView
import mock
import pytest
from six import ensure_text
from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ERROR_MSG
from ddtrace.constants import ERROR_STACK
from ddtrace.constants import ERROR_TYPE
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import USER_KEEP
from ddtrace.contrib.django.patch import instrument_view
from ddtrace.contrib.django.utils import get_request_uri
from ddtrace.ext import http
from ddtrace.internal.compat import PY2
from ddtrace.internal.compat import binary_type
from ddtrace.internal.compat import string_type
from ddtrace.propagation.http import HTTP_HEADER_PARENT_ID
from ddtrace.propagation.http import HTTP_HEADER_SAMPLING_PRIORITY
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.propagation.utils import get_wsgi_header
from ddtrace.vendor import wrapt
from tests.opentracer.utils import init_tracer
from tests.utils import assert_dict_issuperset
from tests.utils import override_config
from tests.utils import override_env
from tests.utils import override_global_config
from tests.utils import override_http_config
@pytest.mark.skipif(django.VERSION < (2, 0, 0), reason="")
def test_django_v2XX_request_root_span(client, test_spans):
"""
When making a request to a Django app
We properly create the `django.request` root span
"""
resp = client.get("/")
assert resp.status_code == 200
assert resp.content == b"Hello, test app."
spans = test_spans.get_spans()
# Assert the correct number of traces and spans
assert len(spans) == 26
# Assert the structure of the root `django.request` span
root = test_spans.get_root_span()
if django.VERSION >= (2, 2, 0):
resource = "GET ^$"
else:
resource = "GET tests.contrib.django.views.index"
meta = {
"django.request.class": "django.core.handlers.wsgi.WSGIRequest",
"django.response.class": "django.http.response.HttpResponse",
"django.user.is_authenticated": "False",
"django.view": "tests.contrib.django.views.index",
"http.method": "GET",
"http.status_code": "200",
"http.url": "http://testserver/",
}
if django.VERSION >= (2, 2, 0):
meta["http.route"] = "^$"
assert http.QUERY_STRING not in root.meta
root.assert_matches(
name="django.request",
service="django",
resource=resource,
parent_id=None,
span_type="web",
error=0,
meta=meta,
)
@pytest.mark.skipif(django.VERSION < (2, 0, 0), reason="")
def test_django_v2XX_alter_root_resource(client, test_spans):
"""
When making a request to a Django app
We properly create the `django.request` root span
"""
resp = client.get("/alter-resource/")
assert resp.status_code == 200
assert resp.content == b""
spans = test_spans.get_spans()
# Assert the correct number of traces and spans
assert len(spans) == 26
# Assert the structure of the root `django.request` span
root = test_spans.get_root_span()
meta = {
"django.request.class": "django.core.handlers.wsgi.WSGIRequest",
"django.response.class": "django.http.response.HttpResponse",
"django.user.is_authenticated": "False",
"django.view": "tests.contrib.django.views.alter_resource",
"http.method": "GET",
"http.status_code": "200",
"http.url": "http://testserver/alter-resource/",
}
if django.VERSION >= (2, 2, 0):
meta["http.route"] = "^alter-resource/$"
assert http.QUERY_STRING not in root.meta
root.assert_matches(
name="django.request",
service="django",
resource="custom django.request resource",
parent_id=None,
span_type="web",
error=0,
meta=meta,
)
@pytest.mark.skipif(django.VERSION >= (2, 0, 0), reason="")
def test_v1XX_middleware(client, test_spans):
resp = client.get("/")
assert resp.status_code == 200
assert resp.content == b"Hello, test app."
# Assert the correct number of traces and spans
if django.VERSION < (1, 11, 0):
test_spans.assert_span_count(15)
else:
test_spans.assert_span_count(16)
# Get all the `django.middleware` spans in this trace
middleware_spans = list(test_spans.filter_spans(name="django.middleware"))
if django.VERSION < (1, 11, 0):
assert len(middleware_spans) == 13
else:
assert len(middleware_spans) == 14
root = test_spans.get_root_span()
root.assert_matches(name="django.request")
# Assert common span structure
for span in middleware_spans:
span.assert_matches(
name="django.middleware",
service="django",
error=0,
span_type=None,
parent_id=root.span_id, # They are all children of the root django.request
)
# DEV: Order matters here, we want all `process_request` before `process_view`, before `process_response`
expected_resources = [
"django.contrib.sessions.middleware.SessionMiddleware.process_request",
"django.middleware.common.CommonMiddleware.process_request",
"django.middleware.csrf.CsrfViewMiddleware.process_request", # Not in < 1.11.0
"django.contrib.auth.middleware.AuthenticationMiddleware.process_request",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware.process_request",
"django.contrib.messages.middleware.MessageMiddleware.process_request",
"django.middleware.security.SecurityMiddleware.process_request",
"django.middleware.csrf.CsrfViewMiddleware.process_view",
"django.middleware.security.SecurityMiddleware.process_response",
"django.middleware.clickjacking.XFrameOptionsMiddleware.process_response",
"django.contrib.messages.middleware.MessageMiddleware.process_response",
"django.middleware.csrf.CsrfViewMiddleware.process_response",
"django.middleware.common.CommonMiddleware.process_response",
"django.contrib.sessions.middleware.SessionMiddleware.process_response",
]
if django.VERSION < (1, 11, 0):
expected_resources.remove("django.middleware.csrf.CsrfViewMiddleware.process_request")
middleware_spans = sorted(middleware_spans, key=lambda s: s.start)
span_resources = [s.resource for s in middleware_spans]
assert span_resources == expected_resources
def test_disallowed_host(client, test_spans):
with override_settings(ALLOWED_HOSTS="not-testserver"):
resp = client.get("/")
assert resp.status_code == 400
assert b"Bad Request (400)" in resp.content
root_span = test_spans.get_root_span()
assert root_span.get_tag("http.status_code") == "400"
assert root_span.get_tag("http.url") == "http://testserver/"
def test_http_header_tracing_disabled(client, test_spans):
headers = {
get_wsgi_header("my-header"): "my_value",
}
resp = client.get("/", **headers)
assert resp.status_code == 200
assert resp.content == b"Hello, test app."
root = test_spans.get_root_span()
assert root.get_tag("http.request.headers.my-header") is None
assert root.get_tag("http.response.headers.my-response-header") is None
def test_http_header_tracing_enabled(client, test_spans):
with override_config("django", {}):
config.django.http.trace_headers(["my-header", "my-response-header"])
headers = {
get_wsgi_header("my-header"): "my_value",
}
resp = client.get("/", **headers)
assert resp.status_code == 200
assert resp.content == b"Hello, test app."
root = test_spans.get_root_span()
assert root.get_tag("http.request.headers.my-header") == "my_value"
assert root.get_tag("http.response.headers.my-response-header") == "my_response_value"
"""
Middleware tests
"""
@pytest.mark.skipif(django.VERSION < (2, 0, 0), reason="")
def test_v2XX_middleware(client, test_spans):
"""
When making a request to a Django app
We properly create the `django.middleware` spans
"""
resp = client.get("/")
assert resp.status_code == 200
assert resp.content == b"Hello, test app."
# Assert the correct number of traces and spans
test_spans.assert_span_count(26)
# Get all the `django.middleware` spans in this trace
middleware_spans = list(test_spans.filter_spans(name="django.middleware"))
assert len(middleware_spans) == 24
# Assert common span structure
for span in middleware_spans:
span.assert_matches(
name="django.middleware",
service="django",
error=0,
span_type=None,
)
span_resources = {
"django.contrib.auth.middleware.AuthenticationMiddleware.__call__",
"django.contrib.auth.middleware.AuthenticationMiddleware.process_request",
"django.contrib.messages.middleware.MessageMiddleware.__call__",
"django.contrib.messages.middleware.MessageMiddleware.process_request",
"django.contrib.messages.middleware.MessageMiddleware.process_response",
"django.contrib.sessions.middleware.SessionMiddleware.__call__",
"django.contrib.sessions.middleware.SessionMiddleware.process_request",
"django.contrib.sessions.middleware.SessionMiddleware.process_response",
"django.middleware.clickjacking.XFrameOptionsMiddleware.__call__",
"django.middleware.clickjacking.XFrameOptionsMiddleware.process_response",
"django.middleware.common.CommonMiddleware.__call__",
"django.middleware.common.CommonMiddleware.process_request",
"django.middleware.common.CommonMiddleware.process_response",
"django.middleware.csrf.CsrfViewMiddleware.__call__",
"django.middleware.csrf.CsrfViewMiddleware.process_request",
"django.middleware.csrf.CsrfViewMiddleware.process_response",
"django.middleware.csrf.CsrfViewMiddleware.process_view",
"django.middleware.security.SecurityMiddleware.__call__",
"django.middleware.security.SecurityMiddleware.process_request",
"django.middleware.security.SecurityMiddleware.process_response",
"tests.contrib.django.middleware.ClsMiddleware.__call__",
"tests.contrib.django.middleware.fn_middleware",
"tests.contrib.django.middleware.EverythingMiddleware.__call__",
"tests.contrib.django.middleware.EverythingMiddleware.process_view",
}
assert set([s.resource for s in middleware_spans]) == span_resources
# Get middleware spans in reverse order of start time
middleware_spans = sorted(middleware_spans, key=lambda s: s.start, reverse=True)
# Assert the first middleware span's parent is the root span (django.request)
root_span = test_spans.get_root_span()
assert root_span.name == "django.request"
first_middleware = middleware_spans[-1]
assert first_middleware.parent_id == root_span.span_id
def test_django_request_not_found(client, test_spans):
"""
When making a request to a Django app
When the endpoint doesn't exist
We create a 404 span
"""
resp = client.get("/unknown/endpoint")
assert resp.status_code == 404
if django.VERSION >= (3, 0, 0):
content = (
b'\n<!doctype html>\n<html lang="en">\n<head>\n <title>Not Found</title>\n'
b"</head>\n<body>\n <h1>Not Found</h1><p>The requested resource was not found "
b"on this server.</p>\n</body>\n</html>\n"
)
elif django.VERSION >= (1, 11, 0):
content = b"<h1>Not Found</h1><p>The requested resource was not found on this server.</p>"
else:
content = b"<h1>Not Found</h1><p>The requested URL /unknown/endpoint was not found on this server.</p>"
assert resp.content == content
# Assert the correct number of traces and spans
if django.VERSION >= (2, 0, 0):
span_count = 27
elif django.VERSION >= (1, 11, 0):
span_count = 18
else:
span_count = 16
test_spans.assert_span_count(span_count)
# Assert the structure of the root `django.request` span
root = test_spans.get_root_span()
root.assert_matches(
name="django.request",
service="django",
resource="GET 404",
parent_id=None,
span_type="web",
error=0,
meta={
"django.request.class": "django.core.handlers.wsgi.WSGIRequest",
"django.response.class": "django.http.response.HttpResponseNotFound",
"http.method": "GET",
"http.status_code": "404",
"http.url": "http://testserver/unknown/endpoint",
},
)
# Assert template render
render_spans = list(test_spans.filter_spans(name="django.template.render"))
assert len(render_spans) == 1
render_span = render_spans[0]
render_span.assert_matches(
name="django.template.render",
resource="django.template.base.Template.render",
meta={
"django.template.engine.class": "django.template.engine.Engine",
},
)
def test_middleware_trace_error_500(client, test_spans):
# ensures exceptions generated by views are traced
with modify_settings(
**(
dict(MIDDLEWARE={"append": "tests.contrib.django.middleware.CatchExceptionMiddleware"})
if django.VERSION >= (2, 0, 0)
else dict(MIDDLEWARE_CLASSES={"append": "tests.contrib.django.middleware.CatchExceptionMiddleware"})
)
):
assert client.get("/error-500/").status_code == 500
error_spans = list(test_spans.filter_spans(error=1))
# There should be 3 spans flagged as errors
# 1. The view span which wraps the original error
# 2. The root span which should just be flagged as an error (no exception info)
# 3. The middleware span that catches the exception and converts it to a 500
assert len(error_spans) == 3
# Test the root span
span = test_spans.get_root_span()
assert span.error == 1
assert span.get_tag("http.status_code") == "500"
assert span.get_tag(http.URL) == "http://testserver/error-500/"
if django.VERSION >= (2, 2, 0):
assert span.resource == "GET ^error-500/$"
else:
assert span.resource == "GET tests.contrib.django.views.error_500"
assert span.get_tag(ERROR_MSG) is None
assert span.get_tag(ERROR_TYPE) is None
assert span.get_tag(ERROR_STACK) is None
# Test the view span (where the exception is generated)
view_span = list(test_spans.filter_spans(name="django.view"))
assert len(view_span) == 1
view_span = view_span[0]
assert view_span.error == 1
# Make sure the message is somewhere in the stack trace
assert "Error 500" in view_span.get_tag(ERROR_STACK)
# Test the catch exception middleware
res = "tests.contrib.django.middleware.CatchExceptionMiddleware.process_exception"
mw_span = list(test_spans.filter_spans(resource=res))[0]
assert mw_span.error == 1
# Make sure the message is somewhere in the stack trace
assert "Error 500" in view_span.get_tag(ERROR_STACK)
assert mw_span.get_tag(ERROR_MSG) is not None
assert mw_span.get_tag(ERROR_TYPE) is not None
assert mw_span.get_tag(ERROR_STACK) is not None
def test_middleware_handled_view_exception_success(client, test_spans):
"""
When an exception is raised in a view and then handled
Only the culprit span contains error properties
"""
with modify_settings(
**(
dict(MIDDLEWARE={"append": "tests.contrib.django.middleware.HandleErrorMiddlewareSuccess"})
if django.VERSION >= (2, 0, 0)
else dict(MIDDLEWARE_CLASSES={"append": "tests.contrib.django.middleware.HandleErrorMiddlewareSuccess"})
)
):
assert client.get("/error-500/").status_code == 200
error_spans = list(test_spans.filter_spans(error=1))
# There should be 1 span flagged as erroneous:
# - The view span which wraps the original error
assert len(error_spans) == 1
# Test the root span
root_span = test_spans.get_root_span()
assert root_span.error == 0
assert root_span.get_tag(ERROR_STACK) is None
assert root_span.get_tag(ERROR_MSG) is None
assert root_span.get_tag(ERROR_TYPE) is None
# Test the view span (where the exception is generated)
view_span = list(test_spans.filter_spans(name="django.view"))
assert len(view_span) == 1
view_span = view_span[0]
assert view_span.error == 1
# Make sure the message is somewhere in the stack trace
assert "Error 500" in view_span.get_tag("error.stack")
@pytest.mark.skipif(django.VERSION < (1, 10, 0), | |
<filename>sdk/python/pulumi_azure_native/insights/alert_rule.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AlertRuleArgs', 'AlertRule']
@pulumi.input_type
class AlertRuleArgs:
def __init__(__self__, *,
condition: pulumi.Input[Union['LocationThresholdRuleConditionArgs', 'ManagementEventRuleConditionArgs', 'ThresholdRuleConditionArgs']],
is_enabled: pulumi.Input[bool],
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
action: Optional[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a AlertRule resource.
:param pulumi.Input[Union['LocationThresholdRuleConditionArgs', 'ManagementEventRuleConditionArgs', 'ThresholdRuleConditionArgs']] condition: the condition that results in the alert rule being activated.
:param pulumi.Input[bool] is_enabled: the flag that indicates whether the alert rule is enabled.
:param pulumi.Input[str] name: the name of the alert rule.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']] action: action that is performed when the alert rule becomes active, and when an alert condition is resolved.
:param pulumi.Input[Sequence[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]] actions: the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
:param pulumi.Input[str] description: the description of the alert rule that will be included in the alert email.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] provisioning_state: the provisioning state.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if action is not None:
pulumi.set(__self__, "action", action)
if actions is not None:
pulumi.set(__self__, "actions", actions)
if description is not None:
pulumi.set(__self__, "description", description)
if location is not None:
pulumi.set(__self__, "location", location)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def condition(self) -> pulumi.Input[Union['LocationThresholdRuleConditionArgs', 'ManagementEventRuleConditionArgs', 'ThresholdRuleConditionArgs']]:
"""
the condition that results in the alert rule being activated.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: pulumi.Input[Union['LocationThresholdRuleConditionArgs', 'ManagementEventRuleConditionArgs', 'ThresholdRuleConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
the flag that indicates whether the alert rule is enabled.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
the name of the alert rule.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]:
"""
action that is performed when the alert rule becomes active, and when an alert condition is resolved.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]]]:
"""
the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[Union['RuleEmailActionArgs', 'RuleWebhookActionArgs']]]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
the description of the alert rule that will be included in the alert email.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
the provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the rule.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class AlertRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]]] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]]]]] = None,
condition: Optional[pulumi.Input[Union[pulumi.InputType['LocationThresholdRuleConditionArgs'], pulumi.InputType['ManagementEventRuleConditionArgs'], pulumi.InputType['ThresholdRuleConditionArgs']]]] = None,
description: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
The alert rule resource.
API Version: 2016-03-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]] action: action that is performed when the alert rule becomes active, and when an alert condition is resolved.
:param pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]]]] actions: the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
:param pulumi.Input[Union[pulumi.InputType['LocationThresholdRuleConditionArgs'], pulumi.InputType['ManagementEventRuleConditionArgs'], pulumi.InputType['ThresholdRuleConditionArgs']]] condition: the condition that results in the alert rule being activated.
:param pulumi.Input[str] description: the description of the alert rule that will be included in the alert email.
:param pulumi.Input[bool] is_enabled: the flag that indicates whether the alert rule is enabled.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] name: the name of the alert rule.
:param pulumi.Input[str] provisioning_state: the provisioning state.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] rule_name: The name of the rule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AlertRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The alert rule resource.
API Version: 2016-03-01.
:param str resource_name: The name of the resource.
:param AlertRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AlertRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]]] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[Union[pulumi.InputType['RuleEmailActionArgs'], pulumi.InputType['RuleWebhookActionArgs']]]]]] = None,
condition: Optional[pulumi.Input[Union[pulumi.InputType['LocationThresholdRuleConditionArgs'], pulumi.InputType['ManagementEventRuleConditionArgs'], pulumi.InputType['ThresholdRuleConditionArgs']]]] = None,
description: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AlertRuleArgs.__new__(AlertRuleArgs)
__props__.__dict__["action"] = action
__props__.__dict__["actions"] = actions
if condition is None and not opts.urn:
raise TypeError("Missing required property 'condition'")
__props__.__dict__["condition"] = condition
__props__.__dict__["description"] = description
if is_enabled is None and not opts.urn:
raise TypeError("Missing required property 'is_enabled'")
__props__.__dict__["is_enabled"] = is_enabled
__props__.__dict__["location"] = location
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["tags"] = tags
__props__.__dict__["last_updated_time"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights:AlertRule"), pulumi.Alias(type_="azure-native:insights/v20140401:AlertRule"), pulumi.Alias(type_="azure-nextgen:insights/v20140401:AlertRule"), pulumi.Alias(type_="azure-native:insights/v20160301:AlertRule"), pulumi.Alias(type_="azure-nextgen:insights/v20160301:AlertRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AlertRule, __self__).__init__(
'azure-native:insights:AlertRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AlertRule':
"""
Get an existing AlertRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AlertRuleArgs.__new__(AlertRuleArgs)
__props__.__dict__["action"] = None
__props__.__dict__["actions"] = None
__props__.__dict__["condition"] = None
__props__.__dict__["description"] = None
__props__.__dict__["is_enabled"] = None
__props__.__dict__["last_updated_time"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return AlertRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[Optional[Any]]:
"""
action that is performed when the alert rule becomes active, and when an alert condition is resolved.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def actions(self) -> pulumi.Output[Optional[Sequence[Any]]]:
"""
the | |
<reponame>mannuan/beryllium
# -*- coding:utf-8 -*-
import warnings
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from pyvirtualdisplay import Display
import random
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import traceback
import inspect
from selenium.webdriver.common.keys import Keys
import time
from beryllium.logger import get_logger
from beryllium.page import Page, PageFunc, NextPageCssSelectorSetup, NextPageLinkTextSetup
from beryllium.listcssselector import ListCssSelector
from beryllium.field import Field, FieldName, FieldType, FieldList
from beryllium.mongodb import Mongodb
import re
import json
import sys
from selenium.webdriver.support.ui import Select
from pymongo.collection import Collection
import os
class Driver(object):
desktop_user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393"
mobile_user_agent = "Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 " \
"(KHTML, like Gecko) Version/9.0 Mobile/13C75 Safari/601.1"
curr_user_agent = desktop_user_agent
scroll_to_center_js_script = "window.scrollBy(arguments[0].getClientRects()[0].x + arguments[0].clientWidth / 2" \
" - window.innerWidth / 2, arguments[0].getClientRects()[0].y" \
" + arguments[0].clientHeight / 2 - window.innerHeight / 2)"
# 验证页面标题
verify_page_title_list = ["验证"]
def __init__(self, log_file_name="00000000", is_mobile=False, is_virtual_display=False, is_headless=False,
is_load_images=False, is_proxy=False, initial_proxy_ip="127.0.0.1"):
"""
:param log_file_name:日志文件名称
:param is_mobile:是否是移动端
:param is_virtual_display:是否隐藏窗口
:param is_headless:是否隐藏窗口
:param is_load_images:是否加载图片
:param is_proxy:是否代理
:param initial_proxy_ip:初始的代理ip
"""
self.logger = get_logger(log_file_name)
self.is_mobile = is_mobile
self.is_virtual_display = is_virtual_display
self.is_headless = is_headless
self.is_load_images = is_load_images
self.driver = self.get_driver()
self.data_key = {}
self.is_proxy = is_proxy
self.initial_proxy_ip = initial_proxy_ip # 用户设定的初始化ip
self.pre_proxy_ip = initial_proxy_ip # 之前的代理ip
def __del__(self):
"""
析构函数
:return:
"""
try:
self.driver.quit()
except Exception:
pass
@staticmethod
def get_curr_ip():
"""
获取当前ip
:return:
"""
return re.findall(r"[\d]{1,3}.[\d]{1,3}.[\d]{1,3}.[\d]{1,3}",
os.popen("tsocks wget -q -O - http://pv.sohu.com/cityjson | awk '{print $5}'").read())[0]
def get_curr_proxy_ip(self):
"""
获得当前代理ip
:return:
"""
while True:
try:
proxy_ip = re.findall(r"[\d]{1,3}.[\d]{1,3}.[\d]{1,3}.[\d]{1,3}", os.popen(
"tsocks wget -q -O - http://pv.sohu.com/cityjson | awk '{print $5}'").read())[0]
return proxy_ip
except Exception as e:
self.error_log(e=e)
self.info_log(data="获取proxy_ip出错!!!")
self.debug_log(data="暂停2秒....")
time.sleep(2)
def is_verify_page(self):
self.switch_window_by_index(index=-1)
return True in [i in self.driver.title for i in self.verify_page_title_list]
def get_options(self):
options = webdriver.ChromeOptions()
if self.is_mobile:
options.add_argument(
"user-agent=%s" % self.mobile_user_agent)
self.curr_user_agent = self.mobile_user_agent
# mobile_emulation = {
# "deviceMetrics": {"width": 360, "height": 640, "pixelRatio": 3.0},
# "userAgent": self.mobile_user_agent}
# mobile_emulation = {"deviceName": "Galaxy S5"}
# options.add_experimental_option('mobileEmulation', mobile_emulation)
else:
options.add_argument(
"user-agent=%s" % self.desktop_user_agent)
self.curr_user_agent = self.desktop_user_agent
options.add_argument("lang=zh_CN.UTF-8")
if self.is_virtual_display:
self.logger.debug("virtual display is running")
display = Display(visible=0, size=(1440, 900))
display.start()
if self.is_virtual_display is False and self.is_headless is True:
self.logger.debug("headless is running")
options.add_argument("--headless")
if not self.is_load_images:
self.logger.debug("load images is false")
options.add_argument("--load-images=false") # 不加载图片
# 1允许所有图片;2阻止所有图片;3阻止第三方服务器图片
prefs = {
"profile.default_content_setting.images": 2,
}
options.add_experimental_option("prefs", prefs)
# options.add_argument("--disable-images")
# prefs = {
# 'profile.default_content_setting.notifications': 2,
# 'profile.default_content_setting.geolocation': 2,
# }
# options.add_experimental_option('prefs', prefs)
# options.add_argument('start-maximized')
options.add_argument("disable-infobars") # 隐藏自动化软件测试的提示
# options.add_argument("--disable-notifications")
# options.add_argument('--disk-cache=true') # 允许页面缓存,来加快页面的加载速度
# options.add_argument('auto-open-devtools-for-tabs')
return options
def get_driver(self):
"""
获得驱动
:return:
"""
driver = webdriver.Chrome(chrome_options=self.get_options())
driver.set_page_load_timeout(30)
return driver
@staticmethod
def __get_running_func__(level=2):
"""
:param level:
:return:
"""
inspect_stack = inspect.stack()
if len(inspect_stack) < 2:
pass
elif len(inspect_stack) < level:
inspect_stack = inspect_stack[:-2][::-1]
elif len(inspect_stack) > level:
inspect_stack = inspect.stack()[-level - 2:-2][::-1]
return " - ".join("%s.%s.[%s]" % (i[1].split("/")[-1].split(".")[0], i[3], i[2]) for i in inspect_stack)
def error_log(self, e: Exception, name="", istraceback=True, level=2):
"""
:param name:
:param e:
:param istraceback:
:param level:
:return:
"""
traceback_e = ""
if istraceback:
traceback_e = traceback.format_exc()
self.logger.error("%s %s: %s\n%s" % (self.__get_running_func__(level=level), name, traceback_e, str(e)))
def warning_log(self, e: Exception, name="", level=2):
"""
:param name:
:param e:
:param level:
:return:
"""
self.logger.warning("%s %s: %s" % (self.__get_running_func__(level=level), name, str(e)))
def info_log(self, data: str, name="", level=2):
"""
:param name:
:param data:
:param level:
:return:
"""
self.logger.info("%s %s: %s" % (self.__get_running_func__(level=level), name, data))
def debug_log(self, data: str, name="", level=2):
"""
:param name:
:param data:
:param level:
:return:
"""
self.logger.debug("%s %s: %s" % (self.__get_running_func__(level=level), name, data))
def new_window(self, url: str):
"""
新建一个标签页
:param url:
:return:
"""
self.driver.execute_script("window.open(\"{}\");".format(url))
def random_vertical_scroll_to(self, min_offset=1000, max_offset=5000):
"""
随机下拉滚动加载
:param min_offset:
:param max_offset:
:return:
"""
self.driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight + %s)" % random.randint(min_offset, max_offset))
def vertical_scroll_to(self, offset=0):
"""
下拉滚动加载, offset为0默认把页面下拉到最下端
:param offset:
:return:
"""
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight + %s)" % offset)
def until_scroll_to_center_by_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
把页面元素滚动到页面中间
:param css_selector:
:param ele:
:param timeout:
:return:
"""
if not ele:
ele = self.driver
try:
self.driver.execute_script(
self.scroll_to_center_js_script, self.until_presence_of_element_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout))
except Exception:
pass
def until_scroll_to_center_by_partial_link_text(self, link_text: str, ele=None, timeout=10):
"""
把页面元素滚动到页面中间
:param link_text:匹配的文字
:param ele:WebElement对象
:param timeout:超时时间
:return:
"""
if not ele:
ele = self.driver
try:
self.driver.execute_script(
self.scroll_to_center_js_script, self.until_presence_of_element_located_by_partial_link_text(
ele=ele, link_text=link_text, timeout=timeout))
except Exception:
pass
def until_scroll_to_center_by_link_text(self, link_text: str, ele=None, timeout=10):
"""
把页面元素滚动到页面中间
:param link_text:匹配的文字
:param ele:WebElement对象
:param timeout:超时时间
:return:
"""
if not ele:
ele = self.driver
try:
self.driver.execute_script(
self.scroll_to_center_js_script, self.until_presence_of_element_located_by_link_text(
ele=ele, link_text=link_text, timeout=timeout))
except Exception:
pass
def scroll_to_center(self, ele: WebElement):
"""
把页面元素滚动到页面中间
:param ele:
:return:
"""
try:
self.driver.execute_script(self.scroll_to_center_js_script, ele)
except Exception:
pass
def vertical_scroll_by(self, offset=100):
"""
页面默认向下滚动100
:param offset:
:return:
"""
self.driver.execute_script("window.scrollBy(0,%s)" % offset)
def scroll_into_view(self, ele: WebElement):
"""
将页面滚动到元素所在位置
Deprecated use scroll_element_to_center
:param ele:
:return:
"""
warnings.warn("use scroll_element_to_center instead", DeprecationWarning)
self.driver.execute_script("arguments[0].scrollIntoView(false);", ele)
def focus_on_element(self, ele: WebElement):
"""
将页面滚动到元素所在位置
Deprecated use scroll_element_to_center
:param ele:
:return:
"""
warnings.warn("use scroll_element_to_center instead", DeprecationWarning)
self.driver.execute_script("arguments[0].focus();", ele)
def focus_on_element_by_css_selector(self, css_selector: str):
"""
通过css_selector,将页面滚动到元素所在位置
Deprecated until_scroll_element_to_center_by_css_selector
:param css_selector:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_css_selector instead", DeprecationWarning)
ele = self.until_presence_of_element_located_by_css_selector(css_selector=css_selector)
self.driver.execute_script("arguments[0].focus();", ele)
def focus_on_element_by_partial_link_text(self, link_text: str):
"""
通过部分匹配文字,将页面滚动到元素所在位置
Deprecated until_scroll_element_to_center_by_partial_link_text
:param link_text:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_partial_link_text instead", DeprecationWarning)
ele = self.until_presence_of_element_located_by_partial_link_text(link_text=link_text)
self.driver.execute_script("arguments[0].focus();", ele)
def until_scroll_into_view_by_css_selector(self, css_selector: str, ele=None):
"""
通过css_selector将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_css_selector
:param ele:WebElement
:param css_selector:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_css_selector instead", DeprecationWarning)
if not ele:
ele = self.driver
ele = self.until_presence_of_element_located_by_css_selector(ele=ele, css_selector=css_selector)
self.driver.execute_script("arguments[0].scrollIntoView(false);", ele)
def until_scroll_into_view_by_partial_link_text(self, link_text: str, ele=None):
"""
通过部分匹配文字将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_partial_link_text
:param ele:WebElement
:param link_text:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_partial_link_text instead", DeprecationWarning)
if not ele:
ele = self.driver
ele = self.until_presence_of_element_located_by_partial_link_text(ele=ele, link_text=link_text)
self.driver.execute_script("arguments[0].scrollIntoView(false);", ele)
def until_scroll_into_view_by_link_text(self, link_text: str, ele=None):
"""
通过匹配文字将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_link_text
:param ele:
:param link_text:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_link_text instead", DeprecationWarning)
if not ele:
ele = self.driver
ele = self.until_presence_of_element_located_by_link_text(ele=ele, link_text=link_text)
self.driver.execute_script("arguments[0].scrollIntoView(false);", ele)
def until_move_to_element_by_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
通过css_selector将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_css_selector
:param ele:WebElement
:param css_selector:
:param timeout:
:return:
"""
if not ele:
ele = self.driver
ActionChains(self.driver).move_to_element(
self.until_presence_of_element_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout)).perform()
def until_move_to_element_by_partial_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过部分匹配文字将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_partial_link_text
:param ele:WebElement
:param link_text:
:param timeout:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_partial_link_text instead", DeprecationWarning)
if not ele:
ele = self.driver
ActionChains(self.driver).move_to_element(
self.until_presence_of_element_located_by_partial_link_text(
ele=ele, link_text=link_text, timeout=timeout)).perform()
def until_move_to_element_by_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过匹配文字将页面移动到元素所在位置
Deprecated until_scroll_element_to_center_by_link_text
:param ele:WebElement
:param link_text:
:param timeout:
:return:
"""
warnings.warn("use until_scroll_element_to_center_by_link_text instead", DeprecationWarning)
if not ele:
ele = self.driver
ActionChains(self.driver).move_to_element(
self.until_presence_of_element_located_by_link_text(
ele=ele, link_text=link_text, timeout=timeout)).perform()
def move_to_element(self, ele=None, x_offset=0, y_offset=0):
"""
将页面移动到元素所在位置
Deprecated scroll_element_to_center but if you are in mobile, the method is encouraged
这个方法由于在移动端使用到,所以被保留
:param ele:WebElement
:param x_offset:
:param y_offset:
:return:
"""
# warnings.warn("use scroll_element_to_center instead while you are in desktop", DeprecationWarning)
if not ele:
raise ValueError
ActionChains(self.driver).move_to_element(ele).move_by_offset(xoffset=x_offset, yoffset=y_offset).perform()
def until_scroll_to_center_click_by_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
通过css_selector将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param css_selector:
:param timeout:超时时间
:return:
"""
if not ele:
ele = self.driver
self.until_scroll_to_center_presence_of_element_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout).click()
def until_scroll_to_center_click_by_partial_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过部分匹配文字将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param link_text:
:return:
"""
if not ele:
ele = self.driver
self.until_scroll_to_center_presence_of_element_located_by_partial_link_text(
ele=ele, link_text=link_text, timeout=timeout).click()
def until_scroll_to_center_click_by_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过匹配文字将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param link_text:
:return:
"""
if not ele:
ele = self.driver
self.until_scroll_to_center_presence_of_element_located_by_link_text(
ele=ele, link_text=link_text, timeout=timeout).click()
def until_scroll_to_center_click_by_first_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
通过第一个css_selector将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param css_selector:
:return:
"""
if not ele:
ele = self.driver
ele = self.until_presence_of_all_elements_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout)[0]
self.scroll_to_center(ele=ele)
ele.click()
def until_scroll_to_center_click_by_first_partial_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过第一个部分匹配文字将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param link_text:
:return:
"""
if not ele:
ele = self.driver
ele = self.until_presence_of_all_elements_located_by_partial_link_text(
ele=ele, link_text=link_text, timeout=timeout)[0]
self.scroll_to_center(ele=ele)
ele.click()
def until_scroll_to_center_click_by_first_link_text(self, link_text: str, ele=None, timeout=10):
"""
通过第一个匹配文字将元素滚动找页面中间,并点击
:param ele:WebElement
:param timeout:
:param link_text:
:return:
"""
if not ele:
ele = self.driver
ele = self.until_presence_of_all_elements_located_by_link_text(ele=ele, link_text=link_text, timeout=timeout)[0]
self.scroll_to_center(ele=ele)
ele.click()
def until_scroll_to_center_click(self, ele: WebElement):
"""
将元素滚动找页面中间,并点击
:param ele:WebElement
:return:
"""
self.scroll_to_center(ele=ele) # 元素居中
ele.click()
def until_scroll_to_center_send_enter_by_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
通过css_selector将元素滚动找页面中间,并回车
:param ele:WebElement对象
:param css_selector:
:param timeout:
:return:
"""
if not ele:
ele = self.driver
self.until_scroll_to_center_by_css_selector(ele=ele, css_selector=css_selector, timeout=timeout)
self.until_presence_of_element_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout).send_keys(Keys.ENTER)
def until_send_enter_by_css_selector(self, css_selector: str, ele=None, timeout=10):
"""
通过css_selector将元素滚动找页面中间,并回车
:param ele:WebElement
:param css_selector:
:param timeout:
:return:
"""
if not ele:
ele = self.driver
self.until_presence_of_element_located_by_css_selector(
ele=ele, css_selector=css_selector, timeout=timeout).send_keys(Keys.ENTER)
def until_scroll_to_center_send_enter_by_link_text(self, link_text: str, ele=None, timeout=10):
"""
| |
<filename>src/chaospy/distributions/baseclass.py
"""
Constructing custom probability distributions is done in one of two ways:
Sublcassing the :class:`~chaospy.distributions.Dist` or by calling
:func:`~chaospy.distributions.construct`. They work about the same except for one
methods are defined, while in the other, functions.
Import the construction function::
>>> from chaospy.distributions import construct, Dist
A simple example for constructing a simple uniform distribution::
>>> def cdf(self, x, lo, up):
... return (x-lo)/(up-lo)
>>> def bnd(self, lo, up):
... return lo, up
>>> Uniform = construct(cdf=cdf, bnd=bnd)
>>> dist = Uniform(lo=-3, up=3)
>>> print(dist.fwd([-3, 0, 3]))
[0. 0.5 1. ]
Here ``cdf`` is the dependent cumulative distribution function as defined in
equation , ``bnd`` is a function returning the lower and upper bounds, and
``a`` and ``b`` are distribution parameters. They take either other
components, or as illustrated: constants.
In addition to ``cdf`` and ``bnd`` there are a few optional arguments. For
example a fully featured uniform random variable is defined as follows::
>>> def pdf(self, x, lo, up):
... return 1./(up-lo)
>>> def ppf(self, q, lo, up):
... return q*(up-lo) + lo
>>> Uniform = construct(
... cdf=cdf, bnd=bnd, pdf=pdf, ppf=ppf)
>>> dist = Uniform(lo=-3, up=3)
There ``pdf`` is probability distribution function and ``ppf`` if the point
percentile function. These are methods that provides needed functionality for
probabilistic collocation. If they are not provided during construct, they are
estimated as far as possible.
Equivalently constructing the same distribution using subclass:
:func:`~chaospy.distributions.construct`::
>>> class Uniform(Dist):
... def __init__(self, lo=0, up=1):
... Dist.__init__(self, lo=lo, up=up)
... def _cdf(self, x, lo, up):
... return (x-lo)/(up-lo)
... def _bnd(self, lo, up):
... return lo, up
... def _pdf(self, x, lo, up):
... return 1./(up-lo)
... def _ppf(self, q, lo, up):
... return q*(up-lo) + lo
... def _str(self, lo, up):
... return "u(%s%s)" % (lo, up)
>>> dist = Uniform(-3, 3)
>>> print(dist.fwd([-3, 0, 3])) # Forward Rosenblatt transformation
[0. 0.5 1. ]
"""
import types
import numpy as np
class Dist(object):
"""
The distribution backend class.
Subclass this module to construct a custom distribution.
If direct subclass of Dist, two method must be provided:
* Cumulative distribution function (CDF): ``_cdf(self, x, **prm)``.
* Upper and lower bounds ``_bnd(self, **prm)``.
The following can be provided:
* Probability density function: ``_pdf(self, x, **prm)``.
* CDF inverse: ``_ppf(self, q, **prm)``.
* Statistical moment generator: ``_mom(self, k, **prm)``.
* TTR coefficients generator: ``_ttr(self, k, **prm)``.
* Pretty print of distribution: ``_str(self, **prm)``.
Alternative use the construct generator
:func:`~chaospy.distributions.construct`.
"""
__array_priority__ = 9000
def __init__(self, **prm):
"""
Args:
_length (int) : Length of the distribution
_advanced (bool) : If True, activate advanced mode
**prm (array_like) : Other optional parameters. Will be assumed when
calling any sub-functions.
"""
from . import graph
for key, val in prm.items():
if not isinstance(val, Dist):
prm[key] = np.array(val)
self.length = int(prm.pop("_length", 1))
self.advance = prm.pop("_advance", False)
self.prm = prm.copy()
self.graph = graph.Graph(self)
self.dependencies = self.graph.run(self.length, "dep")[0]
def range(self, x=None, retall=False, verbose=False):
"""
Generate the upper and lower bounds of a distribution.
Args:
x (array_like, optional) : The bounds might vary over the sample
space. By providing x you can specify where in the space
the bound should be taken. If omited, a (pseudo-)random
sample is used.
Returns:
(np.ndarray) : The lower (out[0]) and upper (out[1]) bound where
out.shape=(2,)+x.shape
"""
dim = len(self)
if x is None:
from . import approx
x = approx.find_interior_point(self)
else:
x = np.array(x)
shape = x.shape
size = int(x.size/dim)
x = x.reshape(dim, size)
out, graph = self.graph.run(x, "range")
out = out.reshape((2,)+shape)
if verbose>1:
print(graph)
if retall:
return out, graph
return out
def fwd(self, x):
"""
Forward Rosenblatt transformation.
Args:
x (array_like) : Location for the distribution function. x.shape
must be compatible with distribution shape.
Returns:
(ndarray) : Evaluated distribution function values, where
out.shape==x.shape.
"""
from . import rosenblatt
return rosenblatt.fwd(self, x)
def cdf(self, x):
"""
Cumulative distribution function.
Note that chaospy only supports cumulative distribution funcitons in
one dimensions.
Args:
x (array_like) : Location for the distribution function. x.shape
must be compatible with distribution shape.
Returns:
(ndarray) : Evaluated distribution function values, where
out.shape==x.shape.
Except:
(NotImplementedError) : for distributions with dependent
components.
"""
if self.dependent():
raise NotImplementedError("""\
Cumulative distribution function is only available for stocastically \
independent variables""")
from . import rosenblatt
out = rosenblatt.fwd(self, x)
if len(self) > 1:
out = np.prod(out, 0)
return out
def inv(self, q, maxiter=100, tol=1e-5, verbose=False, **kws):
"""
Inverse Rosenblatt transformation.
Args:
q (array_like) : Probabilities to be inverse. If any values are
outside [0,1], error will be raised. q.shape must be
compatible with diistribution shape.
Kwargs:
maxiter (int) : Maximum number of iterations
tol (float) : Tolerence level
Returns:
(ndarray) : Inverted probability values where out.shape==q.shape.
"""
from . import rosenblatt
return rosenblatt.inv(self, q, maxiter, tol, **kws)
def pdf(self, x, step=1e-7, verbose=0):
"""
Probability density function.
Args:
x (array_like) : Location for the density function. x.shape must
be compatible with distribution shape.
step (float, array_like) : The step length given aproximation is
used. If array provided, elements are used along each
axis.
Returns:
(ndarray) : Evaluated density function values. Shapes are related
through the identity x.shape=dist.shape+out.shape
"""
dim = len(self)
x = np.array(x)
shape = x.shape
size = int(x.size/dim)
x = x.reshape(dim, size)
out = np.zeros((dim, size))
(lo, up), graph = self.graph.run(x, "range")
valids = np.prod((x.T >= lo.T)*(x.T <= up.T), 1, dtype=bool)
x[:, ~valids] = (.5*(up+lo))[:, ~valids]
out = np.zeros((dim,size))
try:
tmp,graph = self.graph.run(x, "pdf",
eps=step)
out[:,valids] = tmp[:,valids]
except NotImplementedError:
from . import approx
tmp, graph = approx.pdf_full(self, x, step, retall=True)
out[:, valids] = tmp[:, valids]
if verbose:
print("approx %s.pdf")
except IndexError:
pass
if verbose>1:
print(self.graph)
out = out.reshape(shape)
if dim>1:
out = np.prod(out, 0)
return out
def sample(self, size=(), rule="R", antithetic=None,
verbose=False, **kws):
"""
Create pseudo-random generated samples.
Args:
size (int,array_like):
The size of the samples to generate.
rule (str):
Alternative sampling techniques. See
:func:`~chaospy.distributions.sampler.generator.generate_samples`.
antithetic (bool, array_like):
If provided, will be used to setup antithetic variables. If
array, defines the axes to mirror.
Returns:
(ndarray) : Random samples with shape (len(self),)+self.shape
"""
size_ = np.prod(size, dtype=int)
dim = len(self)
if dim > 1:
if isinstance(size, (tuple,list,np.ndarray)):
shape = (dim,) + tuple(size)
else:
shape = (dim, size)
else:
shape = size
from . import sampler
out = sampler.generator.generate_samples(
order=size_, domain=self, rule=rule, antithetic=antithetic)
try:
out = out.reshape(shape)
except:
if len(self) == 1:
out = out.flatten()
else:
out = out.reshape(dim, int(out.size/dim))
return out
def mom(self, K, **kws):
"""
Raw statistical moments.
Creates non-centralized raw moments from the random variable. If
analytical options can not be utilized, Monte Carlo integration
will be used.
Args:
K (array_like) : Index of the raw moments. k.shape must be
compatible with distribution shape. Sampling scheme when
performing Monte Carlo
rule (str) : rule for estimating the moment if the analytical
method fails.
composit (int, array_like optional) : If provided, composit
quadrature will be used. Ignored in the case if
gaussian=True. If int provided, determines number of even
domain splits. If array of ints, determines number of even
domain splits along each axis. If array of arrays/floats,
determines location of splits.
antithetic (array_like, optional) : List of bool. Represents the
axes to mirror using antithetic variable during MCI.
Returns:
(ndarray) : Shapes are related through the identity
`k.shape==dist.shape+k.shape`.
"""
K = np.array(K, dtype=int)
shape = K.shape
dim = len(self)
if dim > 1:
shape = shape[1:]
size = int(K.size/dim)
K = K.reshape(dim, size)
try:
out, _ = self.graph.run(K, "mom", **kws)
except NotImplementedError:
from . import approx
out = approx.mom(self, K, **kws)
return out.reshape(shape)
def ttr(self, k, acc=10**3, verbose=1):
"""
Three terms relation's coefficient generator
Args:
k (array_like, int) : The order of the coefficients.
acc (int) : Accuracy of discretized Stieltjes if analytical
methods are unavailable.
Returns:
(Recurrence coefficients) : Where out[0] is the first (A) and
out[1] is the second coefficient With
`out.shape==(2,)+k.shape`.
"""
k = np.array(k, dtype=int)
dim = len(self)
shape = k.shape
shape = (2,) + shape
size = int(k.size/dim)
k = k.reshape(dim, size)
out, graph = self.graph.run(k, | |
<reponame>deepankur797/random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0)
import seaborn as sns
from scipy import stats
from scipy.stats import norm
train = pd.read_csv("D:\\cygwin\\COLLEGE STUFF(A)\\8th sem\\major 2\\House Pricing\\train.csv")
test = pd.read_csv("D:\\cygwin\\COLLEGE STUFF(A)\\8th sem\\major 2\\House Pricing\\test.csv")
train.head()
print ('The train data has {0} rows and {1} columns'.format(train.shape[0],train.shape[1]))
print ('----------------------------')
print ('The test data has {0} rows and {1} columns'.format(test.shape[0],test.shape[1]))
#train.info()
#check missing values
train.columns[train.isnull().any()]
miss = train.isnull().sum()/len(train)
miss = miss[miss > 0]
miss.sort_values(inplace=True)
miss
#visualising missing values
miss = miss.to_frame()
miss.columns = ['count']
miss.index.names = ['Name']
miss['Name'] = miss.index
#plot the missing value count
sns.set(style="whitegrid", color_codes=True)
sns.barplot(x = 'Name', y = 'count', data=miss)
plt.xticks(rotation = 90)
plt.show()
#SalePrice
sns.distplot(train['SalePrice'])
plt.show()
#skewness
print("The skewness of SalePrice is {}".format(train['SalePrice'].skew()))
#now transforming the target variable
target = np.log(train['SalePrice'])
print('Skewness is', target.skew())
sns.distplot(target)
plt.show()
#separate variables into new data frames
numeric_data = train.select_dtypes(include=[np.number])
cat_data = train.select_dtypes(exclude=[np.number])
print("There are {} numeric and {} categorical columns in train data".format(numeric_data.shape[1],cat_data.shape[1]))
del numeric_data['Id']
print("There are {} numeric and {} categorical columns in train data".format(numeric_data.shape[1],cat_data.shape[1]))
#correlation plot
corr = numeric_data.corr()
sns.heatmap(corr)
plt.show()
print (corr['SalePrice'].sort_values(ascending=False)[:15], '\n') #top 15 values
print ('----------------------')
print (corr['SalePrice'].sort_values(ascending=False)[-5:]) #last 5 values
train['OverallQual'].unique()
array=[ 7, 6, 8, 5, 9, 4, 10, 3, 1, 2]
#let's check the mean price per quality and plot it.
pivot = train.pivot_table(index='OverallQual', values='SalePrice', aggfunc=np.median)
pivot.sort_values
#print(pivot)
pivot.plot(kind='bar', color='red')
plt.show()
#GrLivArea variable
sns.jointplot(x=train['GrLivArea'], y=train['SalePrice'])
plt.show()
cat_data.describe()
sp_pivot = train.pivot_table(index='SaleCondition', values='SalePrice', aggfunc=np.median)
sp_pivot
sp_pivot.plot(kind='bar',color='red')
plt.show()
cat = [f for f in train.columns if train.dtypes[f] == 'object']
def anova(frame):
anv = pd.DataFrame()
anv['features'] = cat
pvals = []
for c in cat:
samples = []
for cls in frame[c].unique():
s = frame[frame[c] == cls]['SalePrice'].values
samples.append(s)
pval = stats.f_oneway(*samples)[1]
pvals.append(pval)
anv['pval'] = pvals
return anv.sort_values('pval')
cat_data['SalePrice'] = train.SalePrice.values
k = anova(cat_data)
k['disparity'] = np.log(1./k['pval'].values)
sns.barplot(data=k, x = 'features', y='disparity')
plt.xticks(rotation=90)
plt.show()
#create numeric plots
num = [f for f in train.columns if train.dtypes[f] != 'object']
num.remove('Id')
nd = pd.melt(train, value_vars = num)
n1 = sns.FacetGrid (nd, col='variable', col_wrap=4, sharex=False, sharey = False)
n1 = n1.map(sns.distplot, 'value')
plt.show()
def boxplot(x, y, **kwargs):
sns.boxplot(x=x, y=y)
x = plt.xticks(rotation=90)
cat = [f for f in train.columns if train.dtypes[f] == 'object']
p = pd.melt(train, id_vars='SalePrice', value_vars=cat)
g = sns.FacetGrid(p, col='variable', col_wrap=2, sharex=False, sharey=False, size=5)
g = g.map(boxplot, 'value', 'SalePrice')
plt.show()
#5 data pre-processing
#removing outliers
train.drop(train[train['GrLivArea'] > 4000].index, inplace=True)
print(train.shape)#removed 4 rows`
#imputing using mode
test.loc[666, 'GarageQual'] = "TA" #stats.mode(test['GarageQual']).mode
test.loc[666, 'GarageCond'] = "TA" #stats.mode(test['GarageCond']).mode
test.loc[666, 'GarageFinish'] = "Unf" #stats.mode(test['GarageFinish']).mode
test.loc[666, 'GarageYrBlt'] = "1980" #np.nanmedian(test['GarageYrBlt'])
#mark as missing
test.loc[1116, 'GarageType'] = np.nan
#importing function
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
def factorize(data, var, fill_na = None):
if fill_na is not None:
data[var].fillna(fill_na, inplace=True)
le.fit(data[var])
data[var] = le.transform(data[var])
return data
#combine the data set
alldata = train.append(test)
print(alldata.shape)
#impute lotfrontage by median of neighborhood
lot_frontage_by_neighborhood = train['LotFrontage'].groupby(train['Neighborhood'])
for key, group in lot_frontage_by_neighborhood:
idx = (alldata['Neighborhood'] == key) & (alldata['LotFrontage'].isnull())
alldata.loc[idx, 'LotFrontage'] = group.median()
#imputing missing values
alldata["MasVnrArea"].fillna(0, inplace=True)
alldata["BsmtFinSF1"].fillna(0, inplace=True)
alldata["BsmtFinSF2"].fillna(0, inplace=True)
alldata["BsmtUnfSF"].fillna(0, inplace=True)
alldata["TotalBsmtSF"].fillna(0, inplace=True)
alldata["GarageArea"].fillna(0, inplace=True)
alldata["BsmtFullBath"].fillna(0, inplace=True)
alldata["BsmtHalfBath"].fillna(0, inplace=True)
alldata["GarageCars"].fillna(0, inplace=True)
alldata["GarageYrBlt"].fillna(0.0, inplace=True)
alldata["PoolArea"].fillna(0, inplace=True)
qual_dict = {np.nan: 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}
name = np.array(['ExterQual','PoolQC' ,'ExterCond','BsmtQual','BsmtCond','HeatingQC','KitchenQual','FireplaceQu', 'GarageQual','GarageCond'])
for i in name:
alldata[i] = alldata[i].map(qual_dict).astype(int)
alldata["BsmtExposure"] = alldata["BsmtExposure"].map({np.nan: 0, "No": 1, "Mn": 2, "Av": 3, "Gd": 4}).astype(int)
bsmt_fin_dict = {np.nan: 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6}
alldata["BsmtFinType1"] = alldata["BsmtFinType1"].map(bsmt_fin_dict).astype(int)
alldata["BsmtFinType2"] = alldata["BsmtFinType2"].map(bsmt_fin_dict).astype(int)
alldata["Functional"] = alldata["Functional"].map({np.nan: 0, "Sal": 1, "Sev": 2, "Maj2": 3, "Maj1": 4, "Mod": 5, "Min2": 6, "Min1": 7, "Typ": 8}).astype(int)
alldata["GarageFinish"] = alldata["GarageFinish"].map({np.nan: 0, "Unf": 1, "RFn": 2, "Fin": 3}).astype(int)
alldata["Fence"] = alldata["Fence"].map({np.nan: 0, "MnWw": 1, "GdWo": 2, "MnPrv": 3, "GdPrv": 4}).astype(int)
#encoding data
alldata["CentralAir"] = (alldata["CentralAir"] == "Y") * 1.0
varst = np.array(['MSSubClass','LotConfig','Neighborhood','Condition1','BldgType','HouseStyle','RoofStyle','Foundation','SaleCondition'])
for x in varst:
factorize(alldata, x)
#encode variables and impute missing values
alldata = factorize(alldata, "MSZoning", "RL")
alldata = factorize(alldata, "Exterior1st", "Other")
alldata = factorize(alldata, "Exterior2nd", "Other")
alldata = factorize(alldata, "MasVnrType", "None")
alldata = factorize(alldata, "SaleType", "Oth")
#creating new variable (1 or 0) based on irregular count levels
#The level with highest count is kept as 1 and rest as 0
alldata["IsRegularLotShape"] = (alldata["LotShape"] == "Reg") * 1
alldata["IsLandLevel"] = (alldata["LandContour"] == "Lvl") * 1
alldata["IsLandSlopeGentle"] = (alldata["LandSlope"] == "Gtl") * 1
alldata["IsElectricalSBrkr"] = (alldata["Electrical"] == "SBrkr") * 1
alldata["IsGarageDetached"] = (alldata["GarageType"] == "Detchd") * 1
alldata["IsPavedDrive"] = (alldata["PavedDrive"] == "Y") * 1
alldata["HasShed"] = (alldata["MiscFeature"] == "Shed") * 1
alldata["Remodeled"] = (alldata["YearRemodAdd"] != alldata["YearBuilt"]) * 1
#Did the modeling happen during the sale year?
alldata["RecentRemodel"] = (alldata["YearRemodAdd"] == alldata["YrSold"]) * 1
# Was this house sold in the year it was built?
alldata["VeryNewHouse"] = (alldata["YearBuilt"] == alldata["YrSold"]) * 1
alldata["Has2ndFloor"] = (alldata["2ndFlrSF"] == 0) * 1
alldata["HasMasVnr"] = (alldata["MasVnrArea"] == 0) * 1
alldata["HasWoodDeck"] = (alldata["WoodDeckSF"] == 0) * 1
alldata["HasOpenPorch"] = (alldata["OpenPorchSF"] == 0) * 1
alldata["HasEnclosedPorch"] = (alldata["EnclosedPorch"] == 0) * 1
alldata["Has3SsnPorch"] = (alldata["3SsnPorch"] == 0) * 1
alldata["HasScreenPorch"] = (alldata["ScreenPorch"] == 0) * 1
#setting levels with high count as 1 and the rest as 0
#you can check for them using the value_counts function
alldata["HighSeason"] = alldata["MoSold"].replace({1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0})
alldata["NewerDwelling"] = alldata["MSSubClass"].replace({20: 1, 30: 0, 40: 0, 45: 0,50: 0, 60: 1, 70: 0, 75: 0, 80: 0, 85: 0,90: 0, 120: 1, 150: 0, 160: 0, 180: 0, 190: 0})
print(alldata.shape)
#create alldata2
alldata2 = train.append(test)
alldata["SaleCondition_PriceDown"] = alldata2.SaleCondition.replace({'Abnorml': 1, 'Alloca': 1, 'AdjLand': 1, 'Family': 1, 'Normal': 0, 'Partial': 0})
# house completed before sale or not
alldata["BoughtOffPlan"] = alldata2.SaleCondition.replace({"Abnorml" : 0, "Alloca" : 0, "AdjLand" : 0, "Family" : 0, "Normal" : 0, "Partial" : 1})
alldata["BadHeating"] = alldata2.HeatingQC.replace({'Ex': 0, 'Gd': 0, 'TA': 0, 'Fa': 1, 'Po': 1})
#calculating total area using all area columns
area_cols = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF','OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'LowQualFinSF', 'PoolArea' ]
alldata["TotalArea"] = alldata[area_cols].sum(axis=1)
alldata["TotalArea1st2nd"] = alldata["1stFlrSF"] + alldata["2ndFlrSF"]
alldata["Age"] = 2010 - alldata["YearBuilt"]
alldata["TimeSinceSold"] = 2010 - alldata["YrSold"]
alldata["SeasonSold"] = alldata["MoSold"].map({12:0, 1:0, 2:0, 3:1, 4:1, 5:1, 6:2, 7:2, 8:2, 9:3, 10:3, 11:3}).astype(int)
alldata["YearsSinceRemodel"] = alldata["YrSold"] - alldata["YearRemodAdd"]
# Simplifications of existing features into bad/average/good based on counts
alldata["SimplOverallQual"] = alldata.OverallQual.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10 : 3})
alldata["SimplOverallCond"] = alldata.OverallCond.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10 : 3})
alldata["SimplPoolQC"] = alldata.PoolQC.replace({1 : 1, 2 : 1, 3 : 2, 4 : 2})
alldata["SimplGarageCond"] = alldata.GarageCond.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplGarageQual"] = alldata.GarageQual.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplFireplaceQu"] = alldata.FireplaceQu.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplFireplaceQu"] = alldata.FireplaceQu.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplFunctional"] = alldata.Functional.replace({1 : 1, 2 : 1, 3 : 2, 4 : 2, 5 : 3, 6 : 3, 7 : 3, 8 : 4})
alldata["SimplKitchenQual"] = alldata.KitchenQual.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplHeatingQC"] = alldata.HeatingQC.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplBsmtFinType1"] = alldata.BsmtFinType1.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2})
alldata["SimplBsmtFinType2"] = alldata.BsmtFinType2.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2})
alldata["SimplBsmtCond"] = alldata.BsmtCond.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplBsmtQual"] = alldata.BsmtQual.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplExterCond"] = alldata.ExterCond.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
alldata["SimplExterQual"] = alldata.ExterQual.replace({1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
#grouping neighborhood variable based on this plot
train['SalePrice'].groupby(train['Neighborhood']).median().sort_values().plot(kind='bar')
plt.show()
neighborhood_map = {"MeadowV" : 0, "IDOTRR" : 1, "BrDale" : 1, "OldTown" : 1, "Edwards" : 1, "BrkSide" : 1,"Sawyer" : 1, "Blueste" : 1, "SWISU" : 2, "NAmes" : 2, "NPkVill" : 2, "Mitchel" : 2, "SawyerW" : 2, "Gilbert" : 2, "NWAmes" : 2, "Blmngtn" : 2, "CollgCr" : 2, "ClearCr" : 3, "Crawfor" : 3, "Veenker" : 3, "Somerst" : 3, "Timber" : 3, "StoneBr" : 4, "NoRidge" : 4, "NridgHt" : 4}
alldata['NeighborhoodBin'] = alldata2['Neighborhood'].map(neighborhood_map)
alldata.loc[alldata2.Neighborhood == 'NridgHt', "Neighborhood_Good"] = 1
alldata.loc[alldata2.Neighborhood == 'Crawfor', "Neighborhood_Good"] = 1
alldata.loc[alldata2.Neighborhood == 'StoneBr', "Neighborhood_Good"] = 1
alldata.loc[alldata2.Neighborhood == 'Somerst', "Neighborhood_Good"] = 1
alldata.loc[alldata2.Neighborhood == 'NoRidge', "Neighborhood_Good"] = 1
alldata["Neighborhood_Good"].fillna(0, inplace=True)
alldata["SaleCondition_PriceDown"] = alldata2.SaleCondition.replace({'Abnorml': 1, 'Alloca': 1, 'AdjLand': 1, 'Family': 1, 'Normal': 0, 'Partial': 0})
# House completed before sale or not
alldata["BoughtOffPlan"] = alldata2.SaleCondition.replace({"Abnorml" : 0, "Alloca" : 0, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.