max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
ch10/data.py
|
kxen42/Learn-Python-Programming-Third-Edition
| 19
|
6627651
|
# data.py
def get_clean_data(source):
data = load_data(source)
cleaned_data = clean_data(data)
return cleaned_data
|
# data.py
def get_clean_data(source):
data = load_data(source)
cleaned_data = clean_data(data)
return cleaned_data
|
none
| 1
| 1.623202
| 2
|
|
baselines/jft/experiments/common_fewshot.py
|
dvdzhang/uncertainty-baselines
| 0
|
6627652
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Most common few-shot eval configuration."""
import ml_collections
def get_fewshot(batch_size=None, target_resolution=224, resize_resolution=256,
runlocal=False):
"""Returns a standard-ish fewshot eval configuration."""
config = ml_collections.ConfigDict()
if batch_size:
config.batch_size = batch_size
config.representation_layer = 'pre_logits'
config.log_steps = 25_000
config.datasets = { # pylint: disable=g-long-ternary
'birds': ('caltech_birds2011', 'train', 'test'),
'caltech': ('caltech101', 'train', 'test'),
'cars': ('cars196:2.1.0', 'train', 'test'),
'cifar100': ('cifar100', 'train', 'test'),
'col_hist': ('colorectal_histology', 'train[:2000]', 'train[2000:]'),
'dtd': ('dtd', 'train', 'test'),
'imagenet': ('imagenet2012_subset/10pct', 'train', 'validation'),
'pets': ('oxford_iiit_pet', 'train', 'test'),
'uc_merced': ('uc_merced', 'train[:1000]', 'train[1000:]'),
} if not runlocal else {
'pets': ('oxford_iiit_pet', 'train', 'test'),
}
config.pp_train = f'decode|resize({resize_resolution})|central_crop({target_resolution})|value_range(-1,1)|keep("image", "label")'
config.pp_eval = config.pp_train
config.shots = [1, 5, 10, 25]
config.l2_regs = [2.0 ** i for i in range(-10, 20)]
config.walk_first = ('imagenet', 10) if not runlocal else ('pets', 10)
return config
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Most common few-shot eval configuration."""
import ml_collections
def get_fewshot(batch_size=None, target_resolution=224, resize_resolution=256,
runlocal=False):
"""Returns a standard-ish fewshot eval configuration."""
config = ml_collections.ConfigDict()
if batch_size:
config.batch_size = batch_size
config.representation_layer = 'pre_logits'
config.log_steps = 25_000
config.datasets = { # pylint: disable=g-long-ternary
'birds': ('caltech_birds2011', 'train', 'test'),
'caltech': ('caltech101', 'train', 'test'),
'cars': ('cars196:2.1.0', 'train', 'test'),
'cifar100': ('cifar100', 'train', 'test'),
'col_hist': ('colorectal_histology', 'train[:2000]', 'train[2000:]'),
'dtd': ('dtd', 'train', 'test'),
'imagenet': ('imagenet2012_subset/10pct', 'train', 'validation'),
'pets': ('oxford_iiit_pet', 'train', 'test'),
'uc_merced': ('uc_merced', 'train[:1000]', 'train[1000:]'),
} if not runlocal else {
'pets': ('oxford_iiit_pet', 'train', 'test'),
}
config.pp_train = f'decode|resize({resize_resolution})|central_crop({target_resolution})|value_range(-1,1)|keep("image", "label")'
config.pp_eval = config.pp_train
config.shots = [1, 5, 10, 25]
config.l2_regs = [2.0 ** i for i in range(-10, 20)]
config.walk_first = ('imagenet', 10) if not runlocal else ('pets', 10)
return config
|
en
| 0.832105
|
# coding=utf-8 # Copyright 2022 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Most common few-shot eval configuration. Returns a standard-ish fewshot eval configuration. # pylint: disable=g-long-ternary
| 1.681057
| 2
|
autotest/osr/osr_proj4.py
|
nyalldawson/gdal
| 1
|
6627653
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test some PROJ.4 specific translation issues.
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2003, <NAME> <<EMAIL>>
# Copyright (c) 2009-2013, <NAME> <even dot rouault at mines-paris dot org>
# Copyright (c) 2014, <NAME> <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
from osgeo import gdal, osr
import gdaltest
###############################################################################
# Return True if proj is at least 4.8.0
have_proj480_flag = None
def have_proj480():
global have_proj480_flag
if have_proj480_flag is not None:
return have_proj480_flag
try:
import ctypes
except:
print('cannot find ctypes')
have_proj480_flag = False
return have_proj480_flag
handle = None
for name in ["libproj.so", "proj.dll", "proj-9.dll", "libproj-0.dll", "libproj-10.dll", "cygproj-10.dll", "libproj.dylib"]:
try:
handle = ctypes.cdll.LoadLibrary(name)
except:
pass
if handle is None:
print('cannot load libproj.so, proj.dll, proj-9.dll, libproj-0.dll, libproj-10.dll, cygproj-10.dll or libproj.dylib')
have_proj480_flag = False
return have_proj480_flag
try:
handle.pj_init
except:
print('cannot find pj_init symbol : weird')
have_proj480_flag = False
return have_proj480_flag
# Proj4.8.0 has introduced the pj_etmerc() function. Test for it
try:
handle.pj_etmerc
have_proj480_flag = True
return have_proj480_flag
except:
print('cannot find pj_etmerc : PROJ < 4.8.0')
have_proj480_flag = False
return have_proj480_flag
###############################################################################
# Test the +k_0 flag works as well as +k when consuming PROJ.4 format.
# This is from Bugzilla bug 355.
#
def osr_proj4_1():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=tmerc +lat_0=53.5000000000 +lon_0=-8.0000000000 +k_0=1.0000350000 +x_0=200000.0000000000 +y_0=250000.0000000000 +a=6377340.189000 +rf=299.324965 +towgs84=482.530,-130.596,564.557,-1.042,-0.214,-0.631,8.15' )
if abs(srs.GetProjParm( osr.SRS_PP_SCALE_FACTOR )-1.000035) > 0.0000005:
gdaltest.post_reason( '+k_0 not supported on import from PROJ.4?' )
return 'fail'
return 'success'
###############################################################################
# Verify that we can import strings with parameter values that are exponents
# and contain a plus sign. As per bug 355 in GDAL/OGR's bugzilla.
#
def osr_proj4_2():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=lcc +x_0=0.6096012192024384e+06 +y_0=0 +lon_0=90dw +lat_0=42dn +lat_1=44d4'n +lat_2=42d44'n +a=6378206.400000 +rf=294.978698 +nadgrids=conus,ntv1_can.dat +units=m" )
if abs(srs.GetProjParm( osr.SRS_PP_FALSE_EASTING )-609601.219) > 0.0005:
gdaltest.post_reason( 'Parsing exponents not supported?' )
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Verify that empty srs'es don't cause a crash (#1718).
#
def osr_proj4_3():
srs = osr.SpatialReference()
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
srs.ExportToProj4()
gdal.PopErrorHandler()
except RuntimeError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('No translation') != -1:
return 'success'
gdaltest.post_reason( 'empty srs not handled properly' )
return 'fail'
###############################################################################
# Verify that unrecognized projections return an error, not those
# annoying ellipsoid-only results.
#
def osr_proj4_4():
srs = osr.SpatialReference()
srs.SetFromUserInput( '+proj=utm +zone=11 +datum=WGS84' )
srs.SetAttrValue( 'PROJCS|PROJECTION', 'FakeTransverseMercator' )
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
srs.ExportToProj4()
gdal.PopErrorHandler()
except RuntimeError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('No translation') != -1:
return 'success'
gdaltest.post_reason( 'unknown srs not handled properly' )
return 'fail'
###############################################################################
# Verify that prime meridians are preserved when round tripping. (#1940)
#
def osr_proj4_5():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=lcc +lat_1=46.8 +lat_0=46.8 +lon_0=0 +k_0=0.99987742 +x_0=600000 +y_0=2200000 +a=6378249.2 +b=6356515 +towgs84=-168,-60,320,0,0,0,0 +pm=paris +units=m +no_defs' )
if abs(float(srs.GetAttrValue('PRIMEM',1)) - 2.3372291667) > 0.00000001:
gdaltest.post_reason('prime meridian lost?')
return 'fail'
if abs(srs.GetProjParm('central_meridian')) != 0.0:
gdaltest.post_reason( 'central meridian altered?' )
return 'fail'
p4 = srs.ExportToProj4()
srs2 = osr.SpatialReference()
srs2.ImportFromProj4( p4 )
if not srs.IsSame(srs2):
gdaltest.post_reason( 'round trip via PROJ.4 damaged srs?' )
print(srs.ExportToPrettyWkt())
print(srs2.ExportToPrettyWkt())
return 'success'
###############################################################################
# Confirm handling of non-zero latitude of origin mercator (#3026)
#
def osr_proj4_6():
expect_proj4 = '+proj=merc +lon_0=0 +lat_ts=46.1333331 +x_0=1000 +y_0=2000 +datum=WGS84 +units=m +no_defs '
wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["latitude_of_origin",46.1333331],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",1000],
PARAMETER["false_northing",2000],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]"""
srs = osr.SpatialReference()
srs.ImportFromWkt(wkt)
proj4 = srs.ExportToProj4()
if proj4 != expect_proj4:
print('Got:', proj4)
print('Expected:', expect_proj4)
gdaltest.post_reason( 'Failed to translate non-zero lat-of-origin mercator.' )
return 'fail'
# Translate back - should be mercator 1sp
expect_wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_2SP"],
PARAMETER["standard_parallel_1",46.1333331],
PARAMETER["central_meridian",0],
PARAMETER["false_easting",1000],
PARAMETER["false_northing",2000],
UNIT["Meter",1]]"""
srs.SetFromUserInput( proj4 )
wkt = srs.ExportToPrettyWkt()
if wkt != expect_wkt:
print('Got: %s' % wkt)
print('Expect:%s' % expect_wkt)
gdaltest.post_reason( 'did not get expected mercator_2sp result.' )
return 'fail'
return 'success'
###############################################################################
# Confirm handling of somerc (#3032).
#
def osr_proj4_7():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 23700 )
proj4 = srs.ExportToProj4()
expected = '+proj=somerc +lat_0=47.14439372222222 +lon_0=19.04857177777778 +k_0=0.99993 +x_0=650000 +y_0=200000 +ellps=GRS67 +towgs84=52.17,-71.82,-14.9,0,0,0,0 +units=m +no_defs '
if proj4 != expected:
gdaltest.post_reason( 'did not get expected proj.4 translation of somerc' )
print('')
print('Got: "%s"' % proj4)
print('Expected:"%s"' % expected)
return 'fail'
srs.ImportFromProj4( proj4 )
expected = """PROJCS["unnamed",
GEOGCS["GRS 67(IUGG 1967)",
DATUM["unknown",
SPHEROID["GRS67",6378160,298.247167427],
TOWGS84[52.17,-71.82,-14.9,0,0,0,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],
PARAMETER["latitude_of_center",47.14439372222222],
PARAMETER["longitude_of_center",19.04857177777778],
PARAMETER["azimuth",90],
PARAMETER["rectified_grid_angle",90],
PARAMETER["scale_factor",0.99993],
PARAMETER["false_easting",650000],
PARAMETER["false_northing",200000],
UNIT["Meter",1]]"""
srs_expected = osr.SpatialReference( wkt = expected )
if not srs.IsSame(srs_expected):
gdaltest.post_reason( 'did not get expected wkt.' )
print( 'Got: %s' % srs.ExportToPrettyWkt() )
return 'fail'
return 'success'
###############################################################################
# Check EPSG:3857, confirm Google Mercator hackery.
def osr_proj4_8():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 3857 )
proj4 = srs.ExportToProj4()
expected = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs'
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:3857 (google mercator) result.' )
print(proj4)
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromEPSG( 3785 )
proj4 = srs.ExportToProj4()
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:3785 (google mercator) result.' )
print(proj4)
return 'fail'
return 'success'
###############################################################################
# NAD27 is a bit special - make sure no towgs84 values come through.
#
def osr_proj4_9():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 4267 )
proj4 = srs.ExportToProj4()
expected = '+proj=longlat +datum=NAD27 +no_defs '
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:4267 (NAD27)' )
print(proj4)
return 'fail'
srs = osr.SpatialReference()
srs.SetFromUserInput( 'NAD27' )
proj4 = srs.ExportToProj4()
if proj4 != expected:
gdaltest.post_reason( 'did not get expected "NAD27"' )
print(proj4)
return 'fail'
return 'success'
###############################################################################
# Does geocentric work okay?
#
def osr_proj4_10():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=geocent +ellps=WGS84 +towgs84=0,0,0 ' )
wkt_expected = 'GEOCCS["Geocentric",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563],TOWGS84[0,0,0,0,0,0,0]],PRIMEM["Greenwich",0]]'
if not gdaltest.equal_srs_from_wkt( wkt_expected, srs.ExportToWkt() ):
gdaltest.post_reason( 'did not get expected wkt.' )
return 'fail'
p4 = srs.ExportToProj4()
srs2 = osr.SpatialReference()
srs2.ImportFromProj4( p4 )
if not srs.IsSame(srs2):
gdaltest.post_reason( 'round trip via PROJ.4 damaged srs?' )
print(srs.ExportToPrettyWkt())
print(srs2.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test round-tripping of all supported projection methods
#
def osr_proj4_11():
proj4strlist = [ '+proj=bonne +lon_0=2 +lat_1=1 +x_0=3 +y_0=4',
'+proj=cass +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=nzmg +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=cea +lon_0=2 +lat_ts=1 +x_0=3 +y_0=4',
'+proj=tmerc +lat_0=1 +lon_0=2 +k=5 +x_0=3 +y_0=4',
'+proj=utm +zone=31 +south',
'+proj=merc +lon_0=2 +lat_ts=45 +x_0=3 +y_0=4',
'+proj=merc +lon_0=2 +k=5 +x_0=3 +y_0=4',
'+proj=stere +lat_0=90 +lat_ts=1 +lon_0=2 +k=2 +x_0=3 +y_0=4',
'+proj=stere +lat_0=-90 +lat_ts=-1 +lon_0=2 +k=2 +x_0=3 +y_0=4',
'+proj=sterea +lat_0=45 +lon_0=2 +k=2 +x_0=3 +y_0=4',
#'+proj=stere +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eqc +lat_ts=0 +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
#'+proj=eqc +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=gstmerc +lat_0=1 +lon_0=2 +k_0=5 +x_0=3 +y_0=4',
'+proj=gnom +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=ortho +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=laea +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=aeqd +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eqdc +lat_0=1 +lon_0=2 +lat_1=-2 +lat_2=-1 +x_0=3 +y_0=4',
'+proj=mill +lat_0=1 +lon_0=2 +x_0=3 +y_0=4 +R_A',
'+proj=moll +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck2 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck3 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck4 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck5 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck6 +lon_0=2 +x_0=3 +y_0=4',
'+proj=poly +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=aea +lat_1=-2 +lat_2=-1 +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=robin +lon_0=2 +x_0=3 +y_0=4',
'+proj=vandg +lon_0=2 +x_0=3 +y_0=4 +R_A',
'+proj=sinu +lon_0=2 +x_0=3 +y_0=4',
'+proj=gall +lon_0=2 +x_0=3 +y_0=4',
'+proj=goode +lon_0=2 +x_0=3 +y_0=4',
'+proj=igh',
'+proj=geos +lon_0=2 +h=1 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=1 +lat_0=1 +lon_0=2 +k_0=2 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=-10 +lat_2=30 +lat_0=60 +lon_0=2 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=-10 +lat_2=30 +lat_0=-10 +lon_0=2 +x_0=3 +y_0=4',
'+proj=omerc +lat_0=1 +lonc=2 +alpha=-1 +k=-3 +x_0=3 +y_0=4 +gamma=-2',
'+proj=somerc +lat_0=1 +lon_0=2 +k_0=2 +x_0=3 +y_0=4',
'+proj=krovak +lat_0=1 +lon_0=2 +alpha=0 +k=2 +x_0=3 +y_0=4',
'+proj=iwm_p +lat_1=-2 +lat_2=-1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=wag1 +x_0=3 +y_0=4',
'+proj=wag2 +x_0=3 +y_0=4',
'+proj=wag3 +lat_ts=1 +x_0=3 +y_0=4',
'+proj=wag4 +x_0=3 +y_0=4',
'+proj=wag5 +x_0=3 +y_0=4',
'+proj=wag6 +x_0=3 +y_0=4',
'+proj=wag7 +x_0=3 +y_0=4',
'+proj=tpeqd +lat_1=1 +lon_1=2 +lat_2=3 +lon_2=4 +x_0=5 +y_0=6',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=us-ft +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=ft +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=yd +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=us-yd +no_defs ',
'+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +no_defs',
'+proj=qsc +lat_0=0 +lon_0=0 +ellps=WGS84 +units=m +no_defs ',
'+proj=sch +plat_0=1 +plon_0=2 +phdg_0=3 +h_0=4'
]
for proj4str in proj4strlist:
# Disabled because proj-4.7.0-4.fc15.x86_64 crashes on that
if proj4str.find('sterea') != -1 and not have_proj480():
continue
srs = osr.SpatialReference()
if proj4str.find("+no_defs") == -1:
proj4str = proj4str + " +ellps=WGS84 +units=m +no_defs "
#print(proj4str)
srs.ImportFromProj4(proj4str)
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(proj4str)
print(srs.ExportToPrettyWkt())
return 'fail'
out = srs.ExportToProj4()
if out != proj4str:
gdaltest.post_reason( 'round trip via PROJ.4 failed' )
print(proj4str)
print(out)
return 'fail'
return 'success'
###############################################################################
# Test importing +init=epsg:XXX
#
def osr_proj4_12():
expect_wkt = """GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9108"]],
AUTHORITY["EPSG","4326"]]"""
srs = osr.SpatialReference()
srs.ImportFromProj4("+init=epsg:4326")
wkt = srs.ExportToPrettyWkt()
if wkt.find("""GEOGCS["WGS 84""") != 0:
print('Got:%s' % wkt)
print('Expected:%s' % expect_wkt)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
return 'success'
###############################################################################
# Test error cases
#
def osr_proj4_13():
proj4strlist = [ '',
#None,
'foo',
'+a=5',
'+proj=foo',
'+proj=longlat +a=5',
'+proj=longlat +ellps=wgs72 +towgs84=3']
gdal.PushErrorHandler('CPLQuietErrorHandler')
for proj4str in proj4strlist:
srs = osr.SpatialReference()
gdal.ErrorReset()
if srs.ImportFromProj4(proj4str) == 0 and gdal.GetLastErrorMsg() == '':
gdal.PopErrorHandler()
return 'fail'
gdal.PopErrorHandler()
return 'success'
###############################################################################
# Test etmerc (#4853)
#
def osr_proj4_14():
proj4str = '+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +nodefs'
# Test importing etmerc
srs = osr.SpatialReference()
srs.ImportFromProj4(proj4str)
wkt = srs.ExportToPrettyWkt()
expect_wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",9],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["Meter",1],
EXTENSION["PROJ4","+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +nodefs"]]"""
if wkt != expect_wkt:
print('Got:%s' % wkt)
print('Expected:%s' % expect_wkt)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromEPSG(32600 + 32)
# Test exporting standard Transverse_Mercator, without any particular option
proj4str = srs.ExportToProj4()
expect_proj4str = '+proj=utm +zone=32 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
# Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=YES
gdal.SetConfigOption('OSR_USE_ETMERC', 'YES')
proj4str = srs.ExportToProj4()
gdal.SetConfigOption('OSR_USE_ETMERC', None)
expect_proj4str = '+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +x_0=500000 +y_0=0 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
# Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=NO
gdal.SetConfigOption('OSR_USE_ETMERC', 'NO')
proj4str = srs.ExportToProj4()
gdal.SetConfigOption('OSR_USE_ETMERC', None)
expect_proj4str = '+proj=tmerc +lat_0=0 +lon_0=9 +k=0.9996 +x_0=500000 +y_0=0 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
return 'success'
###############################################################################
# Test other authorities than EPSG, e.g. IGNF:XXXX
#
def osr_proj4_15():
srs = osr.SpatialReference()
if srs.ImportFromProj4("+init=IGNF:LAMB93") != 0:
return 'skip'
if srs.GetAuthorityName(None) != 'IGNF' or srs.GetAuthorityCode(None) != 'LAMB93':
gdaltest.post_reason('fail')
print(srs)
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason('fail')
print(srs)
return 'fail'
return 'success'
###############################################################################
# Test unit parsing
#
def osr_proj4_16():
def almost(a,b):
if abs(a-b) > 0.000000000001:
return False
return True
units = (('km', 1000.),
('m', 1.),
('dm', 1./10.),
('cm', 1./100.),
('mm', 1./1000.),
('kmi', 1852.0),
('in', 0.0254),
('ft', 0.3048),
('yd', 0.9144),
('mi', 1609.344),
('fath', 1.8288),
('ch', 20.1168),
('link', 0.201168),
('us-in', 1./39.37),
('us-ft', 0.304800609601219),
('us-yd', 0.914401828803658),
('us-ch', 20.11684023368047),
('us-mi', 1609.347218694437),
('ind-yd', 0.91439523),
('ind-ft', 0.30479841),
('ind-ch', 20.11669506))
srs = osr.SpatialReference()
for u in units:
if srs.ImportFromProj4('+proj=utm +zone=11 +datum=WGS84 +units=%s' % u[0] ) != 0:
return 'fail'
to_met = srs.GetLinearUnits()
if not almost(to_met, u[1]):
gdaltest.post_reason('Did not get expected units: %s vs %s' % (str(u), str(to_met)))
return 'fail'
return 'success'
###############################################################################
# Test unit parsing for name assignment
#
def osr_proj4_17():
units = (('km', 'kilometre'),
('m', 'Meter'),
('dm', 'Decimeter'),
('cm', 'Centimeter'),
('mm', 'Millimeter'),
('kmi', 'Nautical_Mile_International'),
('in', 'Inch_International'),
('ft', 'Foot (International)'),
('yd', 'Yard_International'),
('mi', 'Statute_Mile_International'),
('fath', 'Fathom_International'),
('ch', 'Chain_International'),
('link', 'Link_International'),
('us-in', 'Inch_US_Surveyor'),
('us-ft', 'Foot_US'),
('us-yd', 'Yard_US_Surveyor'),
('us-ch', 'Chain_US_Surveyor'),
('us-mi', 'Statute_Mile_US_Surveyor'),
('ind-yd', 'Yard_Indian'),
('ind-ft', 'Foot_Indian'),
('ind-ch', 'Chain_Indian'))
srs = osr.SpatialReference()
for u in units:
if srs.ImportFromProj4('+proj=utm +zone=11 +datum=WGS84 +units=%s' % u[0] ) != 0:
return 'fail'
unit_name = srs.GetLinearUnitsName()
if unit_name != u[1]:
gdaltest.post_reason('Did not get expected unit name: %s vs %s' % (str(u), str(unit_name)))
return 'fail'
return 'success'
###############################################################################
# Test fix for #5511
#
def osr_proj4_18():
for p in [ 'no_off', 'no_uoff']:
srs = osr.SpatialReference()
srs.ImportFromProj4('+proj=omerc +lat_0=57 +lonc=-133 +alpha=-36 +k=0.9999 +x_0=5000000 +y_0=-5000000 +%s +datum=NAD83 +units=m +no_defs' % p)
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
out = srs.ExportToProj4()
proj4str = '+proj=omerc +lat_0=57 +lonc=-133 +alpha=-36 +k=0.9999 +x_0=5000000 +y_0=-5000000 +no_uoff +gamma=-36 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs '
if out != proj4str:
gdaltest.post_reason( 'round trip via PROJ.4 failed' )
print(p)
print(proj4str)
print(out)
return 'fail'
return 'success'
###############################################################################
# Test EXTENSION and AUTHORITY in DATUM
def osr_proj4_19():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +datum=WGS84 +nadgrids=@null" )
if srs.ExportToWkt() != 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],EXTENSION["PROJ4_GRIDS","@null"],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]':
gdaltest.post_reason( 'fail' )
print(srs.ExportToWkt())
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test EXTENSION in GOGCS
def osr_proj4_20():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +foo=bar +wktext" )
if srs.ExportToWkt() != 'GEOGCS["WGS 84",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],EXTENSION["PROJ4","+proj=longlat +foo=bar +wktext"]]' and \
srs.ExportToWkt() != 'GEOGCS["unnamed ellipse",DATUM["unknown",SPHEROID["unnamed",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],EXTENSION["PROJ4","+proj=longlat +foo=bar +wktext"]]':
gdaltest.post_reason( 'fail' )
print(srs.ExportToWkt())
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test importing datum other than WGS84, WGS72, NAD27 or NAD83
def osr_proj4_21():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +datum=nzgd49" )
gdal.SetConfigOption('OVERRIDE_PROJ_DATUM_WITH_TOWGS84', 'NO')
got = srs.ExportToProj4()
gdal.SetConfigOption('OVERRIDE_PROJ_DATUM_WITH_TOWGS84', None)
if got.find('+proj=longlat +datum=nzgd49') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing ellipsoid defined with +R
def osr_proj4_22():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +R=1" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=1 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing ellipsoid defined with +a and +f
def osr_proj4_23():
# +f=0 particular case
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +a=1 +f=0" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=1 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +a=2 +f=0.5" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=2 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +to_meter
def osr_proj4_24():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=1.0" )
got = srs.ExportToProj4()
if got.find('+units=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.3048" )
got = srs.ExportToProj4()
if got.find('+units=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.3048006096012192" )
got = srs.ExportToProj4()
if got.find('+units=us-ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# unknown
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.4" )
got = srs.ExportToProj4()
if got.find('+to_meter=0.4') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +vto_meter
def osr_proj4_25():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=1.0" )
got = srs.ExportToProj4()
if got.find('+vunits=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.3048" )
got = srs.ExportToProj4()
if got.find('+vunits=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.3048006096012192" )
got = srs.ExportToProj4()
if got.find('+vunits=us-ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Unknown
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.4" )
got = srs.ExportToProj4()
if got.find('+vto_meter=0.4') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +vunits
def osr_proj4_26():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=m" )
got = srs.ExportToProj4()
if got.find('+vunits=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=ft" )
got = srs.ExportToProj4()
if got.find('+vunits=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US yard
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=us-yd" )
got = srs.ExportToProj4()
if got.find('+vunits=us-yd') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test geostationary +sweep (#6030)
def osr_proj4_27():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=geos +h=35785831 +lon_0=0 +datum=WGS84 +sweep=x +units=m" )
got = srs.ExportToProj4()
if got.find('+proj=geos +h=35785831 +lon_0=0 +datum=WGS84 +sweep=x +units=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing +init=epsg: with an override
def osr_proj4_28():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+init=epsg:32631 +units=cm" )
got = srs.ExportToWkt()
if got.find('32631') >= 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
def osr_proj4_28_missing_proj_epsg_dict():
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
ret = gdaltest.runexternal(python_exe + ' osr_proj4.py osr_proj4_28')
if ret.find('fail') >= 0:
print(ret)
return 'fail'
return 'success'
gdaltest_list = [
osr_proj4_1,
osr_proj4_2,
osr_proj4_3,
osr_proj4_4,
osr_proj4_5,
osr_proj4_6,
osr_proj4_7,
osr_proj4_8,
osr_proj4_9,
osr_proj4_10,
osr_proj4_11,
osr_proj4_12,
osr_proj4_13,
osr_proj4_14,
osr_proj4_15,
osr_proj4_16,
osr_proj4_17,
osr_proj4_18,
osr_proj4_19,
osr_proj4_20,
osr_proj4_21,
osr_proj4_22,
osr_proj4_23,
osr_proj4_24,
osr_proj4_25,
osr_proj4_26,
osr_proj4_27,
osr_proj4_28,
osr_proj4_28_missing_proj_epsg_dict
]
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == "osr_proj4_28":
os.putenv('PROJ_LIB', '/i/dont_exist')
gdaltest.run_tests( [ osr_proj4_28 ] )
sys.exit(0)
gdaltest.setup_run( 'osr_proj4' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test some PROJ.4 specific translation issues.
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2003, <NAME> <<EMAIL>>
# Copyright (c) 2009-2013, <NAME> <even dot rouault at mines-paris dot org>
# Copyright (c) 2014, <NAME> <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
from osgeo import gdal, osr
import gdaltest
###############################################################################
# Return True if proj is at least 4.8.0
have_proj480_flag = None
def have_proj480():
global have_proj480_flag
if have_proj480_flag is not None:
return have_proj480_flag
try:
import ctypes
except:
print('cannot find ctypes')
have_proj480_flag = False
return have_proj480_flag
handle = None
for name in ["libproj.so", "proj.dll", "proj-9.dll", "libproj-0.dll", "libproj-10.dll", "cygproj-10.dll", "libproj.dylib"]:
try:
handle = ctypes.cdll.LoadLibrary(name)
except:
pass
if handle is None:
print('cannot load libproj.so, proj.dll, proj-9.dll, libproj-0.dll, libproj-10.dll, cygproj-10.dll or libproj.dylib')
have_proj480_flag = False
return have_proj480_flag
try:
handle.pj_init
except:
print('cannot find pj_init symbol : weird')
have_proj480_flag = False
return have_proj480_flag
# Proj4.8.0 has introduced the pj_etmerc() function. Test for it
try:
handle.pj_etmerc
have_proj480_flag = True
return have_proj480_flag
except:
print('cannot find pj_etmerc : PROJ < 4.8.0')
have_proj480_flag = False
return have_proj480_flag
###############################################################################
# Test the +k_0 flag works as well as +k when consuming PROJ.4 format.
# This is from Bugzilla bug 355.
#
def osr_proj4_1():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=tmerc +lat_0=53.5000000000 +lon_0=-8.0000000000 +k_0=1.0000350000 +x_0=200000.0000000000 +y_0=250000.0000000000 +a=6377340.189000 +rf=299.324965 +towgs84=482.530,-130.596,564.557,-1.042,-0.214,-0.631,8.15' )
if abs(srs.GetProjParm( osr.SRS_PP_SCALE_FACTOR )-1.000035) > 0.0000005:
gdaltest.post_reason( '+k_0 not supported on import from PROJ.4?' )
return 'fail'
return 'success'
###############################################################################
# Verify that we can import strings with parameter values that are exponents
# and contain a plus sign. As per bug 355 in GDAL/OGR's bugzilla.
#
def osr_proj4_2():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=lcc +x_0=0.6096012192024384e+06 +y_0=0 +lon_0=90dw +lat_0=42dn +lat_1=44d4'n +lat_2=42d44'n +a=6378206.400000 +rf=294.978698 +nadgrids=conus,ntv1_can.dat +units=m" )
if abs(srs.GetProjParm( osr.SRS_PP_FALSE_EASTING )-609601.219) > 0.0005:
gdaltest.post_reason( 'Parsing exponents not supported?' )
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Verify that empty srs'es don't cause a crash (#1718).
#
def osr_proj4_3():
srs = osr.SpatialReference()
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
srs.ExportToProj4()
gdal.PopErrorHandler()
except RuntimeError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('No translation') != -1:
return 'success'
gdaltest.post_reason( 'empty srs not handled properly' )
return 'fail'
###############################################################################
# Verify that unrecognized projections return an error, not those
# annoying ellipsoid-only results.
#
def osr_proj4_4():
srs = osr.SpatialReference()
srs.SetFromUserInput( '+proj=utm +zone=11 +datum=WGS84' )
srs.SetAttrValue( 'PROJCS|PROJECTION', 'FakeTransverseMercator' )
try:
gdal.PushErrorHandler('CPLQuietErrorHandler')
srs.ExportToProj4()
gdal.PopErrorHandler()
except RuntimeError:
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find('No translation') != -1:
return 'success'
gdaltest.post_reason( 'unknown srs not handled properly' )
return 'fail'
###############################################################################
# Verify that prime meridians are preserved when round tripping. (#1940)
#
def osr_proj4_5():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=lcc +lat_1=46.8 +lat_0=46.8 +lon_0=0 +k_0=0.99987742 +x_0=600000 +y_0=2200000 +a=6378249.2 +b=6356515 +towgs84=-168,-60,320,0,0,0,0 +pm=paris +units=m +no_defs' )
if abs(float(srs.GetAttrValue('PRIMEM',1)) - 2.3372291667) > 0.00000001:
gdaltest.post_reason('prime meridian lost?')
return 'fail'
if abs(srs.GetProjParm('central_meridian')) != 0.0:
gdaltest.post_reason( 'central meridian altered?' )
return 'fail'
p4 = srs.ExportToProj4()
srs2 = osr.SpatialReference()
srs2.ImportFromProj4( p4 )
if not srs.IsSame(srs2):
gdaltest.post_reason( 'round trip via PROJ.4 damaged srs?' )
print(srs.ExportToPrettyWkt())
print(srs2.ExportToPrettyWkt())
return 'success'
###############################################################################
# Confirm handling of non-zero latitude of origin mercator (#3026)
#
def osr_proj4_6():
expect_proj4 = '+proj=merc +lon_0=0 +lat_ts=46.1333331 +x_0=1000 +y_0=2000 +datum=WGS84 +units=m +no_defs '
wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["latitude_of_origin",46.1333331],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",1000],
PARAMETER["false_northing",2000],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]"""
srs = osr.SpatialReference()
srs.ImportFromWkt(wkt)
proj4 = srs.ExportToProj4()
if proj4 != expect_proj4:
print('Got:', proj4)
print('Expected:', expect_proj4)
gdaltest.post_reason( 'Failed to translate non-zero lat-of-origin mercator.' )
return 'fail'
# Translate back - should be mercator 1sp
expect_wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_2SP"],
PARAMETER["standard_parallel_1",46.1333331],
PARAMETER["central_meridian",0],
PARAMETER["false_easting",1000],
PARAMETER["false_northing",2000],
UNIT["Meter",1]]"""
srs.SetFromUserInput( proj4 )
wkt = srs.ExportToPrettyWkt()
if wkt != expect_wkt:
print('Got: %s' % wkt)
print('Expect:%s' % expect_wkt)
gdaltest.post_reason( 'did not get expected mercator_2sp result.' )
return 'fail'
return 'success'
###############################################################################
# Confirm handling of somerc (#3032).
#
def osr_proj4_7():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 23700 )
proj4 = srs.ExportToProj4()
expected = '+proj=somerc +lat_0=47.14439372222222 +lon_0=19.04857177777778 +k_0=0.99993 +x_0=650000 +y_0=200000 +ellps=GRS67 +towgs84=52.17,-71.82,-14.9,0,0,0,0 +units=m +no_defs '
if proj4 != expected:
gdaltest.post_reason( 'did not get expected proj.4 translation of somerc' )
print('')
print('Got: "%s"' % proj4)
print('Expected:"%s"' % expected)
return 'fail'
srs.ImportFromProj4( proj4 )
expected = """PROJCS["unnamed",
GEOGCS["GRS 67(IUGG 1967)",
DATUM["unknown",
SPHEROID["GRS67",6378160,298.247167427],
TOWGS84[52.17,-71.82,-14.9,0,0,0,0]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]],
PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"],
PARAMETER["latitude_of_center",47.14439372222222],
PARAMETER["longitude_of_center",19.04857177777778],
PARAMETER["azimuth",90],
PARAMETER["rectified_grid_angle",90],
PARAMETER["scale_factor",0.99993],
PARAMETER["false_easting",650000],
PARAMETER["false_northing",200000],
UNIT["Meter",1]]"""
srs_expected = osr.SpatialReference( wkt = expected )
if not srs.IsSame(srs_expected):
gdaltest.post_reason( 'did not get expected wkt.' )
print( 'Got: %s' % srs.ExportToPrettyWkt() )
return 'fail'
return 'success'
###############################################################################
# Check EPSG:3857, confirm Google Mercator hackery.
def osr_proj4_8():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 3857 )
proj4 = srs.ExportToProj4()
expected = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs'
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:3857 (google mercator) result.' )
print(proj4)
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromEPSG( 3785 )
proj4 = srs.ExportToProj4()
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:3785 (google mercator) result.' )
print(proj4)
return 'fail'
return 'success'
###############################################################################
# NAD27 is a bit special - make sure no towgs84 values come through.
#
def osr_proj4_9():
srs = osr.SpatialReference()
srs.ImportFromEPSG( 4267 )
proj4 = srs.ExportToProj4()
expected = '+proj=longlat +datum=NAD27 +no_defs '
if proj4 != expected:
gdaltest.post_reason( 'did not get expected EPSG:4267 (NAD27)' )
print(proj4)
return 'fail'
srs = osr.SpatialReference()
srs.SetFromUserInput( 'NAD27' )
proj4 = srs.ExportToProj4()
if proj4 != expected:
gdaltest.post_reason( 'did not get expected "NAD27"' )
print(proj4)
return 'fail'
return 'success'
###############################################################################
# Does geocentric work okay?
#
def osr_proj4_10():
srs = osr.SpatialReference()
srs.ImportFromProj4( '+proj=geocent +ellps=WGS84 +towgs84=0,0,0 ' )
wkt_expected = 'GEOCCS["Geocentric",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563],TOWGS84[0,0,0,0,0,0,0]],PRIMEM["Greenwich",0]]'
if not gdaltest.equal_srs_from_wkt( wkt_expected, srs.ExportToWkt() ):
gdaltest.post_reason( 'did not get expected wkt.' )
return 'fail'
p4 = srs.ExportToProj4()
srs2 = osr.SpatialReference()
srs2.ImportFromProj4( p4 )
if not srs.IsSame(srs2):
gdaltest.post_reason( 'round trip via PROJ.4 damaged srs?' )
print(srs.ExportToPrettyWkt())
print(srs2.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test round-tripping of all supported projection methods
#
def osr_proj4_11():
proj4strlist = [ '+proj=bonne +lon_0=2 +lat_1=1 +x_0=3 +y_0=4',
'+proj=cass +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=nzmg +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=cea +lon_0=2 +lat_ts=1 +x_0=3 +y_0=4',
'+proj=tmerc +lat_0=1 +lon_0=2 +k=5 +x_0=3 +y_0=4',
'+proj=utm +zone=31 +south',
'+proj=merc +lon_0=2 +lat_ts=45 +x_0=3 +y_0=4',
'+proj=merc +lon_0=2 +k=5 +x_0=3 +y_0=4',
'+proj=stere +lat_0=90 +lat_ts=1 +lon_0=2 +k=2 +x_0=3 +y_0=4',
'+proj=stere +lat_0=-90 +lat_ts=-1 +lon_0=2 +k=2 +x_0=3 +y_0=4',
'+proj=sterea +lat_0=45 +lon_0=2 +k=2 +x_0=3 +y_0=4',
#'+proj=stere +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eqc +lat_ts=0 +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
#'+proj=eqc +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=gstmerc +lat_0=1 +lon_0=2 +k_0=5 +x_0=3 +y_0=4',
'+proj=gnom +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=ortho +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=laea +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=aeqd +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eqdc +lat_0=1 +lon_0=2 +lat_1=-2 +lat_2=-1 +x_0=3 +y_0=4',
'+proj=mill +lat_0=1 +lon_0=2 +x_0=3 +y_0=4 +R_A',
'+proj=moll +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck2 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck3 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck4 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck5 +lon_0=2 +x_0=3 +y_0=4',
'+proj=eck6 +lon_0=2 +x_0=3 +y_0=4',
'+proj=poly +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=aea +lat_1=-2 +lat_2=-1 +lat_0=1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=robin +lon_0=2 +x_0=3 +y_0=4',
'+proj=vandg +lon_0=2 +x_0=3 +y_0=4 +R_A',
'+proj=sinu +lon_0=2 +x_0=3 +y_0=4',
'+proj=gall +lon_0=2 +x_0=3 +y_0=4',
'+proj=goode +lon_0=2 +x_0=3 +y_0=4',
'+proj=igh',
'+proj=geos +lon_0=2 +h=1 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=1 +lat_0=1 +lon_0=2 +k_0=2 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=-10 +lat_2=30 +lat_0=60 +lon_0=2 +x_0=3 +y_0=4',
'+proj=lcc +lat_1=-10 +lat_2=30 +lat_0=-10 +lon_0=2 +x_0=3 +y_0=4',
'+proj=omerc +lat_0=1 +lonc=2 +alpha=-1 +k=-3 +x_0=3 +y_0=4 +gamma=-2',
'+proj=somerc +lat_0=1 +lon_0=2 +k_0=2 +x_0=3 +y_0=4',
'+proj=krovak +lat_0=1 +lon_0=2 +alpha=0 +k=2 +x_0=3 +y_0=4',
'+proj=iwm_p +lat_1=-2 +lat_2=-1 +lon_0=2 +x_0=3 +y_0=4',
'+proj=wag1 +x_0=3 +y_0=4',
'+proj=wag2 +x_0=3 +y_0=4',
'+proj=wag3 +lat_ts=1 +x_0=3 +y_0=4',
'+proj=wag4 +x_0=3 +y_0=4',
'+proj=wag5 +x_0=3 +y_0=4',
'+proj=wag6 +x_0=3 +y_0=4',
'+proj=wag7 +x_0=3 +y_0=4',
'+proj=tpeqd +lat_1=1 +lon_1=2 +lat_2=3 +lon_2=4 +x_0=5 +y_0=6',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=us-ft +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=ft +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=yd +no_defs ',
'+proj=utm +zone=31 +south +ellps=WGS84 +units=us-yd +no_defs ',
'+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +no_defs',
'+proj=qsc +lat_0=0 +lon_0=0 +ellps=WGS84 +units=m +no_defs ',
'+proj=sch +plat_0=1 +plon_0=2 +phdg_0=3 +h_0=4'
]
for proj4str in proj4strlist:
# Disabled because proj-4.7.0-4.fc15.x86_64 crashes on that
if proj4str.find('sterea') != -1 and not have_proj480():
continue
srs = osr.SpatialReference()
if proj4str.find("+no_defs") == -1:
proj4str = proj4str + " +ellps=WGS84 +units=m +no_defs "
#print(proj4str)
srs.ImportFromProj4(proj4str)
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(proj4str)
print(srs.ExportToPrettyWkt())
return 'fail'
out = srs.ExportToProj4()
if out != proj4str:
gdaltest.post_reason( 'round trip via PROJ.4 failed' )
print(proj4str)
print(out)
return 'fail'
return 'success'
###############################################################################
# Test importing +init=epsg:XXX
#
def osr_proj4_12():
expect_wkt = """GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9108"]],
AUTHORITY["EPSG","4326"]]"""
srs = osr.SpatialReference()
srs.ImportFromProj4("+init=epsg:4326")
wkt = srs.ExportToPrettyWkt()
if wkt.find("""GEOGCS["WGS 84""") != 0:
print('Got:%s' % wkt)
print('Expected:%s' % expect_wkt)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
return 'success'
###############################################################################
# Test error cases
#
def osr_proj4_13():
proj4strlist = [ '',
#None,
'foo',
'+a=5',
'+proj=foo',
'+proj=longlat +a=5',
'+proj=longlat +ellps=wgs72 +towgs84=3']
gdal.PushErrorHandler('CPLQuietErrorHandler')
for proj4str in proj4strlist:
srs = osr.SpatialReference()
gdal.ErrorReset()
if srs.ImportFromProj4(proj4str) == 0 and gdal.GetLastErrorMsg() == '':
gdal.PopErrorHandler()
return 'fail'
gdal.PopErrorHandler()
return 'success'
###############################################################################
# Test etmerc (#4853)
#
def osr_proj4_14():
proj4str = '+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +nodefs'
# Test importing etmerc
srs = osr.SpatialReference()
srs.ImportFromProj4(proj4str)
wkt = srs.ExportToPrettyWkt()
expect_wkt = """PROJCS["unnamed",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",9],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["Meter",1],
EXTENSION["PROJ4","+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +nodefs"]]"""
if wkt != expect_wkt:
print('Got:%s' % wkt)
print('Expected:%s' % expect_wkt)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromEPSG(32600 + 32)
# Test exporting standard Transverse_Mercator, without any particular option
proj4str = srs.ExportToProj4()
expect_proj4str = '+proj=utm +zone=32 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
# Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=YES
gdal.SetConfigOption('OSR_USE_ETMERC', 'YES')
proj4str = srs.ExportToProj4()
gdal.SetConfigOption('OSR_USE_ETMERC', None)
expect_proj4str = '+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +x_0=500000 +y_0=0 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
# Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=NO
gdal.SetConfigOption('OSR_USE_ETMERC', 'NO')
proj4str = srs.ExportToProj4()
gdal.SetConfigOption('OSR_USE_ETMERC', None)
expect_proj4str = '+proj=tmerc +lat_0=0 +lon_0=9 +k=0.9996 +x_0=500000 +y_0=0 +datum=WGS84 +units=m +no_defs '
if proj4str != expect_proj4str:
print('Got:%s' % proj4str)
print('Expected:%s' % expect_proj4str)
gdaltest.post_reason( 'Did not get expected result.' )
return 'fail'
return 'success'
###############################################################################
# Test other authorities than EPSG, e.g. IGNF:XXXX
#
def osr_proj4_15():
srs = osr.SpatialReference()
if srs.ImportFromProj4("+init=IGNF:LAMB93") != 0:
return 'skip'
if srs.GetAuthorityName(None) != 'IGNF' or srs.GetAuthorityCode(None) != 'LAMB93':
gdaltest.post_reason('fail')
print(srs)
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason('fail')
print(srs)
return 'fail'
return 'success'
###############################################################################
# Test unit parsing
#
def osr_proj4_16():
def almost(a,b):
if abs(a-b) > 0.000000000001:
return False
return True
units = (('km', 1000.),
('m', 1.),
('dm', 1./10.),
('cm', 1./100.),
('mm', 1./1000.),
('kmi', 1852.0),
('in', 0.0254),
('ft', 0.3048),
('yd', 0.9144),
('mi', 1609.344),
('fath', 1.8288),
('ch', 20.1168),
('link', 0.201168),
('us-in', 1./39.37),
('us-ft', 0.304800609601219),
('us-yd', 0.914401828803658),
('us-ch', 20.11684023368047),
('us-mi', 1609.347218694437),
('ind-yd', 0.91439523),
('ind-ft', 0.30479841),
('ind-ch', 20.11669506))
srs = osr.SpatialReference()
for u in units:
if srs.ImportFromProj4('+proj=utm +zone=11 +datum=WGS84 +units=%s' % u[0] ) != 0:
return 'fail'
to_met = srs.GetLinearUnits()
if not almost(to_met, u[1]):
gdaltest.post_reason('Did not get expected units: %s vs %s' % (str(u), str(to_met)))
return 'fail'
return 'success'
###############################################################################
# Test unit parsing for name assignment
#
def osr_proj4_17():
units = (('km', 'kilometre'),
('m', 'Meter'),
('dm', 'Decimeter'),
('cm', 'Centimeter'),
('mm', 'Millimeter'),
('kmi', 'Nautical_Mile_International'),
('in', 'Inch_International'),
('ft', 'Foot (International)'),
('yd', 'Yard_International'),
('mi', 'Statute_Mile_International'),
('fath', 'Fathom_International'),
('ch', 'Chain_International'),
('link', 'Link_International'),
('us-in', 'Inch_US_Surveyor'),
('us-ft', 'Foot_US'),
('us-yd', 'Yard_US_Surveyor'),
('us-ch', 'Chain_US_Surveyor'),
('us-mi', 'Statute_Mile_US_Surveyor'),
('ind-yd', 'Yard_Indian'),
('ind-ft', 'Foot_Indian'),
('ind-ch', 'Chain_Indian'))
srs = osr.SpatialReference()
for u in units:
if srs.ImportFromProj4('+proj=utm +zone=11 +datum=WGS84 +units=%s' % u[0] ) != 0:
return 'fail'
unit_name = srs.GetLinearUnitsName()
if unit_name != u[1]:
gdaltest.post_reason('Did not get expected unit name: %s vs %s' % (str(u), str(unit_name)))
return 'fail'
return 'success'
###############################################################################
# Test fix for #5511
#
def osr_proj4_18():
for p in [ 'no_off', 'no_uoff']:
srs = osr.SpatialReference()
srs.ImportFromProj4('+proj=omerc +lat_0=57 +lonc=-133 +alpha=-36 +k=0.9999 +x_0=5000000 +y_0=-5000000 +%s +datum=NAD83 +units=m +no_defs' % p)
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
out = srs.ExportToProj4()
proj4str = '+proj=omerc +lat_0=57 +lonc=-133 +alpha=-36 +k=0.9999 +x_0=5000000 +y_0=-5000000 +no_uoff +gamma=-36 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs '
if out != proj4str:
gdaltest.post_reason( 'round trip via PROJ.4 failed' )
print(p)
print(proj4str)
print(out)
return 'fail'
return 'success'
###############################################################################
# Test EXTENSION and AUTHORITY in DATUM
def osr_proj4_19():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +datum=WGS84 +nadgrids=@null" )
if srs.ExportToWkt() != 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],EXTENSION["PROJ4_GRIDS","@null"],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]':
gdaltest.post_reason( 'fail' )
print(srs.ExportToWkt())
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test EXTENSION in GOGCS
def osr_proj4_20():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +foo=bar +wktext" )
if srs.ExportToWkt() != 'GEOGCS["WGS 84",DATUM["unknown",SPHEROID["WGS84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],EXTENSION["PROJ4","+proj=longlat +foo=bar +wktext"]]' and \
srs.ExportToWkt() != 'GEOGCS["unnamed ellipse",DATUM["unknown",SPHEROID["unnamed",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],EXTENSION["PROJ4","+proj=longlat +foo=bar +wktext"]]':
gdaltest.post_reason( 'fail' )
print(srs.ExportToWkt())
return 'fail'
if srs.Validate() != 0:
gdaltest.post_reason( 'does not validate' )
print(srs.ExportToPrettyWkt())
return 'fail'
return 'success'
###############################################################################
# Test importing datum other than WGS84, WGS72, NAD27 or NAD83
def osr_proj4_21():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +datum=nzgd49" )
gdal.SetConfigOption('OVERRIDE_PROJ_DATUM_WITH_TOWGS84', 'NO')
got = srs.ExportToProj4()
gdal.SetConfigOption('OVERRIDE_PROJ_DATUM_WITH_TOWGS84', None)
if got.find('+proj=longlat +datum=nzgd49') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing ellipsoid defined with +R
def osr_proj4_22():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +R=1" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=1 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing ellipsoid defined with +a and +f
def osr_proj4_23():
# +f=0 particular case
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +a=1 +f=0" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=1 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=longlat +a=2 +f=0.5" )
got = srs.ExportToProj4()
if got.find('+proj=longlat +a=2 +b=1') != 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +to_meter
def osr_proj4_24():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=1.0" )
got = srs.ExportToProj4()
if got.find('+units=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.3048" )
got = srs.ExportToProj4()
if got.find('+units=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.3048006096012192" )
got = srs.ExportToProj4()
if got.find('+units=us-ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# unknown
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +to_meter=0.4" )
got = srs.ExportToProj4()
if got.find('+to_meter=0.4') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +vto_meter
def osr_proj4_25():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=1.0" )
got = srs.ExportToProj4()
if got.find('+vunits=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.3048" )
got = srs.ExportToProj4()
if got.find('+vunits=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.3048006096012192" )
got = srs.ExportToProj4()
if got.find('+vunits=us-ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Unknown
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vto_meter=0.4" )
got = srs.ExportToProj4()
if got.find('+vto_meter=0.4') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing linear units defined with +vunits
def osr_proj4_26():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=m" )
got = srs.ExportToProj4()
if got.find('+vunits=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# Intl foot
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=ft" )
got = srs.ExportToProj4()
if got.find('+vunits=ft') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
# US yard
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=merc +geoidgrids=foo +vunits=us-yd" )
got = srs.ExportToProj4()
if got.find('+vunits=us-yd') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test geostationary +sweep (#6030)
def osr_proj4_27():
if not have_proj480():
return 'skip'
srs = osr.SpatialReference()
srs.ImportFromProj4( "+proj=geos +h=35785831 +lon_0=0 +datum=WGS84 +sweep=x +units=m" )
got = srs.ExportToProj4()
if got.find('+proj=geos +h=35785831 +lon_0=0 +datum=WGS84 +sweep=x +units=m') < 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
###############################################################################
# Test importing +init=epsg: with an override
def osr_proj4_28():
srs = osr.SpatialReference()
srs.ImportFromProj4( "+init=epsg:32631 +units=cm" )
got = srs.ExportToWkt()
if got.find('32631') >= 0:
gdaltest.post_reason( 'fail' )
print(got)
return 'fail'
return 'success'
def osr_proj4_28_missing_proj_epsg_dict():
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
ret = gdaltest.runexternal(python_exe + ' osr_proj4.py osr_proj4_28')
if ret.find('fail') >= 0:
print(ret)
return 'fail'
return 'success'
gdaltest_list = [
osr_proj4_1,
osr_proj4_2,
osr_proj4_3,
osr_proj4_4,
osr_proj4_5,
osr_proj4_6,
osr_proj4_7,
osr_proj4_8,
osr_proj4_9,
osr_proj4_10,
osr_proj4_11,
osr_proj4_12,
osr_proj4_13,
osr_proj4_14,
osr_proj4_15,
osr_proj4_16,
osr_proj4_17,
osr_proj4_18,
osr_proj4_19,
osr_proj4_20,
osr_proj4_21,
osr_proj4_22,
osr_proj4_23,
osr_proj4_24,
osr_proj4_25,
osr_proj4_26,
osr_proj4_27,
osr_proj4_28,
osr_proj4_28_missing_proj_epsg_dict
]
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == "osr_proj4_28":
os.putenv('PROJ_LIB', '/i/dont_exist')
gdaltest.run_tests( [ osr_proj4_28 ] )
sys.exit(0)
gdaltest.setup_run( 'osr_proj4' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
en
| 0.289862
|
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test some PROJ.4 specific translation issues. # Author: <NAME> <<EMAIL>> # ############################################################################### # Copyright (c) 2003, <NAME> <<EMAIL>> # Copyright (c) 2009-2013, <NAME> <even dot rouault at mines-paris dot org> # Copyright (c) 2014, <NAME> <kyle at pobox dot com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### ############################################################################### # Return True if proj is at least 4.8.0 # Proj4.8.0 has introduced the pj_etmerc() function. Test for it ############################################################################### # Test the +k_0 flag works as well as +k when consuming PROJ.4 format. # This is from Bugzilla bug 355. # ############################################################################### # Verify that we can import strings with parameter values that are exponents # and contain a plus sign. As per bug 355 in GDAL/OGR's bugzilla. # ############################################################################### # Verify that empty srs'es don't cause a crash (#1718). # ############################################################################### # Verify that unrecognized projections return an error, not those # annoying ellipsoid-only results. # ############################################################################### # Verify that prime meridians are preserved when round tripping. (#1940) # ############################################################################### # Confirm handling of non-zero latitude of origin mercator (#3026) # PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_1SP"], PARAMETER["latitude_of_origin",46.1333331], PARAMETER["central_meridian",0], PARAMETER["scale_factor",1], PARAMETER["false_easting",1000], PARAMETER["false_northing",2000], UNIT["metre",1, AUTHORITY["EPSG","9001"]]] # Translate back - should be mercator 1sp PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_2SP"], PARAMETER["standard_parallel_1",46.1333331], PARAMETER["central_meridian",0], PARAMETER["false_easting",1000], PARAMETER["false_northing",2000], UNIT["Meter",1]] ############################################################################### # Confirm handling of somerc (#3032). # PROJCS["unnamed", GEOGCS["GRS 67(IUGG 1967)", DATUM["unknown", SPHEROID["GRS67",6378160,298.247167427], TOWGS84[52.17,-71.82,-14.9,0,0,0,0]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433]], PROJECTION["Hotine_Oblique_Mercator_Azimuth_Center"], PARAMETER["latitude_of_center",47.14439372222222], PARAMETER["longitude_of_center",19.04857177777778], PARAMETER["azimuth",90], PARAMETER["rectified_grid_angle",90], PARAMETER["scale_factor",0.99993], PARAMETER["false_easting",650000], PARAMETER["false_northing",200000], UNIT["Meter",1]] ############################################################################### # Check EPSG:3857, confirm Google Mercator hackery. ############################################################################### # NAD27 is a bit special - make sure no towgs84 values come through. # ############################################################################### # Does geocentric work okay? # ############################################################################### # Test round-tripping of all supported projection methods # #'+proj=stere +lat_0=1 +lon_0=2 +x_0=3 +y_0=4', #'+proj=eqc +lat_0=1 +lon_0=2 +x_0=3 +y_0=4', # Disabled because proj-4.7.0-4.fc15.x86_64 crashes on that #print(proj4str) ############################################################################### # Test importing +init=epsg:XXX # GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4326"]] GEOGCS["WGS 84 ############################################################################### # Test error cases # #None, ############################################################################### # Test etmerc (#4853) # # Test importing etmerc PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]], PROJECTION["Transverse_Mercator"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",9], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["Meter",1], EXTENSION["PROJ4","+proj=etmerc +lat_0=0 +lon_0=9 +k=0.9996 +units=m +x_0=500000 +datum=WGS84 +nodefs"]] # Test exporting standard Transverse_Mercator, without any particular option # Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=YES # Test exporting standard Transverse_Mercator, with OSR_USE_ETMERC=NO ############################################################################### # Test other authorities than EPSG, e.g. IGNF:XXXX # ############################################################################### # Test unit parsing # ############################################################################### # Test unit parsing for name assignment # ############################################################################### # Test fix for #5511 # ############################################################################### # Test EXTENSION and AUTHORITY in DATUM ############################################################################### # Test EXTENSION in GOGCS ############################################################################### # Test importing datum other than WGS84, WGS72, NAD27 or NAD83 ############################################################################### # Test importing ellipsoid defined with +R ############################################################################### # Test importing ellipsoid defined with +a and +f # +f=0 particular case ############################################################################### # Test importing linear units defined with +to_meter # Intl foot # US foot # unknown ############################################################################### # Test importing linear units defined with +vto_meter # Intl foot # US foot # Unknown ############################################################################### # Test importing linear units defined with +vunits # Intl foot # US yard ############################################################################### # Test geostationary +sweep (#6030) ############################################################################### # Test importing +init=epsg: with an override
| 1.558318
| 2
|
test/aqua/test_hhl.py
|
SooluThomas/qiskit-aqua
| 0
|
6627654
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test HHL """
import unittest
from test.aqua import QiskitAquaTestCase
import numpy as np
from ddt import ddt, idata, unpack
from qiskit import BasicAer
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit.aqua.algorithms import HHL, ExactLSsolver
from qiskit.aqua.utils import random_matrix_generator as rmg
from qiskit.aqua.operators import MatrixOperator
from qiskit.aqua.components.eigs import EigsQPE
from qiskit.aqua.components.reciprocals import LookupRotation, LongDivision
from qiskit.aqua.components.qfts import Standard as StandardQFTS
from qiskit.aqua.components.iqfts import Standard as StandardIQFTS
from qiskit.aqua.components.initial_states import Custom
@ddt
class TestHHL(QiskitAquaTestCase):
"""HHL tests."""
def setUp(self):
super(TestHHL, self).setUp()
self.random_seed = 0
aqua_globals.random_seed = self.random_seed
@staticmethod
def _create_eigs(matrix, num_ancillae, negative_evals):
# Adding an additional flag qubit for negative eigenvalues
ne_qfts = [None, None]
if negative_evals:
num_ancillae += 1
ne_qfts = [StandardQFTS(num_ancillae - 1), StandardIQFTS(num_ancillae - 1)]
return EigsQPE(MatrixOperator(matrix=matrix),
StandardIQFTS(num_ancillae),
num_time_slices=1,
num_ancillae=num_ancillae,
expansion_mode='suzuki',
expansion_order=2,
evo_time=None,
negative_evals=negative_evals,
ne_qfts=ne_qfts)
@idata([[[0, 1]], [[1, 0]], [[1, 0.1]], [[1, 1]], [[1, 10]]])
@unpack
def test_hhl_diagonal(self, vector):
""" hhl diagonal test """
self.log.debug('Testing HHL simple test in mode Lookup with statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[-1, 0]], [[0, -1]], [[-1, -1]]])
@unpack
def test_hhl_diagonal_negative(self, vector):
""" hhl diagonal negative test """
self.log.debug('Testing HHL simple test in mode Lookup with statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[0, 1]], [[1, 0.1]], [[1, 1]]])
@unpack
def test_hhl_diagonal_longdivison(self, vector):
""" hhl diagonal long division test """
self.log.debug('Testing HHL simple test in mode LongDivision and statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal
reci = LongDivision(scale=1.0, negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[0, 1]], [[1, 0]], [[1, 0.1]], [[1, 1]], [[1, 10]]])
@unpack
def test_hhl_diagonal_qasm(self, vector):
""" hhl diagonal qasm test """
self.log.debug('Testing HHL simple test with qasm simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals,
scale=0.5, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('qasm_simulator'), shots=1000,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[3, 4], [5, 5]])
@unpack
def test_hhl_diagonal_other_dim(self, n, num_ancillary):
""" hhl diagonal other dim test """
self.log.debug('Testing HHL with matrix dimension other than 2**n')
matrix = rmg.random_diag(n, eigrange=[0, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, num_ancillary, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_negative_eigs(self):
""" hhl negative eigs test """
self.log.debug('Testing HHL with matrix with negative eigenvalues')
n = 2
matrix = rmg.random_diag(n, eigrange=[-1, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result["solution"]
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=3)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_random_hermitian(self):
""" hhl random hermitian test """
self.log.debug('Testing HHL with random hermitian matrix')
n = 2
matrix = rmg.random_hermitian(n, eigrange=[0, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_non_hermitian(self):
""" hhl non hermitian test """
self.log.debug('Testing HHL with simple non-hermitian matrix')
matrix = [[1, 1], [2, 1]]
vector = [1, 0]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 6, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
self.assertGreater(fidelity, 0.8)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test HHL """
import unittest
from test.aqua import QiskitAquaTestCase
import numpy as np
from ddt import ddt, idata, unpack
from qiskit import BasicAer
from qiskit.quantum_info import state_fidelity
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit.aqua.algorithms import HHL, ExactLSsolver
from qiskit.aqua.utils import random_matrix_generator as rmg
from qiskit.aqua.operators import MatrixOperator
from qiskit.aqua.components.eigs import EigsQPE
from qiskit.aqua.components.reciprocals import LookupRotation, LongDivision
from qiskit.aqua.components.qfts import Standard as StandardQFTS
from qiskit.aqua.components.iqfts import Standard as StandardIQFTS
from qiskit.aqua.components.initial_states import Custom
@ddt
class TestHHL(QiskitAquaTestCase):
"""HHL tests."""
def setUp(self):
super(TestHHL, self).setUp()
self.random_seed = 0
aqua_globals.random_seed = self.random_seed
@staticmethod
def _create_eigs(matrix, num_ancillae, negative_evals):
# Adding an additional flag qubit for negative eigenvalues
ne_qfts = [None, None]
if negative_evals:
num_ancillae += 1
ne_qfts = [StandardQFTS(num_ancillae - 1), StandardIQFTS(num_ancillae - 1)]
return EigsQPE(MatrixOperator(matrix=matrix),
StandardIQFTS(num_ancillae),
num_time_slices=1,
num_ancillae=num_ancillae,
expansion_mode='suzuki',
expansion_order=2,
evo_time=None,
negative_evals=negative_evals,
ne_qfts=ne_qfts)
@idata([[[0, 1]], [[1, 0]], [[1, 0.1]], [[1, 1]], [[1, 10]]])
@unpack
def test_hhl_diagonal(self, vector):
""" hhl diagonal test """
self.log.debug('Testing HHL simple test in mode Lookup with statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[-1, 0]], [[0, -1]], [[-1, -1]]])
@unpack
def test_hhl_diagonal_negative(self, vector):
""" hhl diagonal negative test """
self.log.debug('Testing HHL simple test in mode Lookup with statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[0, 1]], [[1, 0.1]], [[1, 1]]])
@unpack
def test_hhl_diagonal_longdivison(self, vector):
""" hhl diagonal long division test """
self.log.debug('Testing HHL simple test in mode LongDivision and statevector simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal
reci = LongDivision(scale=1.0, negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=5)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[[0, 1]], [[1, 0]], [[1, 0.1]], [[1, 1]], [[1, 10]]])
@unpack
def test_hhl_diagonal_qasm(self, vector):
""" hhl diagonal qasm test """
self.log.debug('Testing HHL simple test with qasm simulator')
matrix = [[1, 0], [0, 1]]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 3, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals,
scale=0.5, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('qasm_simulator'), shots=1000,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
@idata([[3, 4], [5, 5]])
@unpack
def test_hhl_diagonal_other_dim(self, n, num_ancillary):
""" hhl diagonal other dim test """
self.log.debug('Testing HHL with matrix dimension other than 2**n')
matrix = rmg.random_diag(n, eigrange=[0, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, num_ancillary, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_negative_eigs(self):
""" hhl negative eigs test """
self.log.debug('Testing HHL with matrix with negative eigenvalues')
n = 2
matrix = rmg.random_diag(n, eigrange=[-1, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result["solution"]
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare results
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=3)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_random_hermitian(self):
""" hhl random hermitian test """
self.log.debug('Testing HHL with random hermitian matrix')
n = 2
matrix = rmg.random_hermitian(n, eigrange=[0, 1])
vector = aqua_globals.random.random_sample(n)
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 4, False)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
np.testing.assert_approx_equal(fidelity, 1, significant=1)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_normed)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
def test_hhl_non_hermitian(self):
""" hhl non hermitian test """
self.log.debug('Testing HHL with simple non-hermitian matrix')
matrix = [[1, 1], [2, 1]]
vector = [1, 0]
# run ExactLSsolver
ref_result = ExactLSsolver(matrix, vector).run()
ref_solution = ref_result['solution']
ref_normed = ref_solution/np.linalg.norm(ref_solution)
# run hhl
orig_size = len(vector)
matrix, vector, truncate_powerdim, truncate_hermitian = HHL.matrix_resize(matrix, vector)
# Initialize eigenvalue finding module
eigs = TestHHL._create_eigs(matrix, 6, True)
num_q, num_a = eigs.get_register_sizes()
# Initialize initial state module
init_state = Custom(num_q, state_vector=vector)
# Initialize reciprocal rotation module
reci = LookupRotation(negative_evals=eigs._negative_evals, evo_time=eigs._evo_time)
algo = HHL(matrix, vector, truncate_powerdim, truncate_hermitian, eigs,
init_state, reci, num_q, num_a, orig_size)
hhl_result = algo.run(QuantumInstance(BasicAer.get_backend('statevector_simulator'),
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed))
hhl_solution = hhl_result['solution']
hhl_normed = hhl_solution/np.linalg.norm(hhl_solution)
# compare result
fidelity = state_fidelity(ref_normed, hhl_normed)
self.assertGreater(fidelity, 0.8)
self.log.debug('HHL solution vector: %s', hhl_solution)
self.log.debug('algebraic solution vector: %s', ref_solution)
self.log.debug('fidelity HHL to algebraic: %s', fidelity)
self.log.debug('probability of result: %s', hhl_result["probability_result"])
if __name__ == '__main__':
unittest.main()
|
en
| 0.562061
|
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. Test HHL HHL tests. # Adding an additional flag qubit for negative eigenvalues hhl diagonal test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare results hhl diagonal negative test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare results hhl diagonal long division test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal # compare results hhl diagonal qasm test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare results hhl diagonal other dim test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare result hhl negative eigs test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare results hhl random hermitian test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare result hhl non hermitian test # run ExactLSsolver # run hhl # Initialize eigenvalue finding module # Initialize initial state module # Initialize reciprocal rotation module # compare result
| 2.120059
| 2
|
examples/BigBoy/mediumboy_model.py
|
attraylor/poke-env
| 4
|
6627655
|
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from tqdm import trange
from copy import deepcopy
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# import torchvision.transforms as T
from poke_env.player_configuration import PlayerConfiguration
from poke_env.player.env_player import Gen7EnvSinglePlayer
from poke_env.player.random_player import RandomPlayer
from poke_env.server_configuration import LocalhostServerConfiguration
from poke_env.player.player import Player
from sklearn.decomposition import PCA #Grab PCA functions
import matplotlib.pyplot as plt
from poke_env.data import STR_TO_ID, ID_TO_STR, MOVES
from poke_env.utils import to_id_str
import relevant_conditions
def homogenize_vectors(vectors):
tensors = []
for vector in vectors:
tensor = torch.FloatTensor(vector)
if len(tensor.shape) == 1: #Batch size is 1:
tensor = tensor.unsqueeze(0)
elif len(tensor.shape) == 3: #Batch size is 1:
tensor = tensor.squeeze(0)
tensors.append(tensor)
return tensors
FWG_ONEHOTS = {0: [0, 0, 0], 7: [1, 0, 0], 10: [0, 1, 0], 18: [0, 0, 1]}
POKEMONID_ONEHOTS = {
1036: [1, 0, 0, 0, 0, 0],
388: [0, 1, 0, 0, 0, 0],
984: [0, 0, 1, 0, 0, 0],
152: [0, 0, 0, 1, 0, 0],
439: [0, 0, 0, 0, 1, 0],
365: [0, 0, 0, 0, 0, 1],
}
class MediumBoy_DQN(nn.Module):
def __init__(self, config):
super(MediumBoy_DQN, self).__init__()
#Embedding dimension sizes
self.type_embedding_style = "twohot"
self.type_embedding = nn.Embedding(18, 18)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(18))
self.type_embedding.weight.requires_grad = False
self.input_dim = 18
self.hidden_dim = config.complete_state_hidden_dim
self.output_dim = 22
self.layers = []
self.layer1 = nn.Linear(self.input_dim, self.hidden_dim)
self.layer2 = nn.Linear(self.hidden_dim, self.output_dim)
self.layer2.weight.data.fill_(0)
self.layer2.bias.data.fill_(0)
#self.layers.append(nn.Linear(self.input_dim,config.hidden_dim))
#for i in range(1, config.num_layers):
# self.layers.append(nn.Linear(config.hidden_dim,config.hidden_dim))
#self.layers.append(nn.Linear(self.hidden_dim,config.output_dim))
def build_model(self):
self.input_dim = 0
#How will the model represent typing information?
if config.represent_types_as in ["onehot", "twohot"]:
self.type_embedding = nn.Embedding(18, 18)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(18))
self.type_embedding.weight.requires_grad = False
if config.represent_types_as == "onehot":
self.type_embedding_size = 18 * 2
else:
self.type_embedding_size = 18
else:
print("Typing rep not implemented")
sys.exit(1)
pass
if config.include_our_pokemon_species_typing == True:
self.input_dim += self.type_embedding_size * config.number_pokemon
if config.include_opponent_pokemon_species_typing == True:
self.input_dim += self.type_embedding_size #TODO: Make this a little more modular.
#How does the model represent species information?
if config.represent_species_as in ["onehot"]:
self.num_species = 6
self.species_embedding = nn.Embedding(self.num_species, self.num_species)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(6))
self.type_embedding.weight.requires_grad = False
self.species_embedding_size = self.num_species
else:
print("Species rep not implemented")
sys.exit(1)
pass
if config.include_our_pokemon_species_embedding == True:
self.input_dim += self.species_embedding_size * config.number_pokemon
if config.include_opponent_pokemon_species_embedding == True:
self.input_dim += self.species_embedding_size #TODO: Make this a little more modular.
#How does the model represent health?
if config.include_our_pokemon_health == True:
self.input_dim += config.number_pokemon #Todo: make more modular (just active?)
if config.include_opponent_pokemon_health == True:
self.input_dim += 1 #Todo: make more modular (all pokemon?)
#How does the model represent move information?
if config.include_our_pokemon_move_power == True:
self.input_dim += 4 #Todo: represent back pokemon move power as well?
if config.include_our_pokemon_move_typing == True:
if config.represent_move_typing_as == "same_as_species":
self.move_typing_emb_dim = 18 #Hardcoded
else:
print("move typing emb not implemented")
sys.exit(1)
self.input_dim += 4 * self.move_typing_emb_dim
#Make our model
self.hidden_dim = config.complete_state_hidden_dim
self.output_dim = 22
self.layers = []
self.layer1 = nn.Linear(self.input_dim, self.hidden_dim)
self.layer2 = nn.Linear(self.hidden_dim, self.output_dim)
self.layer2.weight.data.fill_(0)
self.layer2.bias.data.fill_(0)
return
def get_features(self, state_dict, config):
batch_size = len(state_dict["weather"])
input_features = []
if config.include_our_pokemon_species_typing == True:
species_typing = self.type_embedding(state_dict["team"][:]["type_ids"])
if config.represent_species_as == "twohot":
species_typing = species_typing[:, :, 0, :] + species_typing[:, :, 1, :] #[batch_size, team_size (6), type_0, type_emb_dim]
input_features.append(species_typing)
if config.include_our_pokemon_species_typing == True:
opponent_species_typing = self.type_embedding(state_dict["opponent_team"][0]["type_ids"]) #TODO: modularity
if config.represent_species_as == "twohot":
opponent_species_typing = opponent_species_typing[:, 0, :] + opponent_species_typing[:, 1, :] #[batch_size, team_size (6), type_0, type_emb_dim]
input_features.append(opponent_species_typing)
if config.include_our_pokemon_species_embedding == True:
input_features.append(self.species_embedding(state_dict["team"][:]["species_id"]))
if config.include_opponent_species_embedding == True:
input_features.append(self.species_embedding(state_dict["opponent_team"][0]["species_id"])) #TODO: modularity
if len(batch_size) == 1:
features = torch.cat(input_features)
else:
features = torch.cat(input_features,dim=1))
assert len(features) = self.input_dim
active_pokemon = state_dict["team"][0]
backup_pokemon1 = state_dict["team"][1]
backup_pokemon2 = state_dict["team"][2]
move_features = torch.FloatTensor(active_pokemon["move_powers"])
opponent_pokemon = state_dict["opponent_team"][0]
opp_health = torch.FloatTensor(state_dict["opponent_team"][0]["hp_percentage"])
health = torch.FloatTensor(active_pokemon["hp_percentage"])
if len(move_features.shape) == 1:
active_pokemon_type_ids = torch.FloatTensor(FWG_ONEHOTS[active_pokemon["type_ids"][0]])
backup_pokemon1_type_ids = torch.FloatTensor(FWG_ONEHOTS[backup_pokemon1["type_ids"][0]])
backup_pokemon2_type_ids = torch.FloatTensor(FWG_ONEHOTS[backup_pokemon2["type_ids"][0]])
#type_ids = torch.LongTensor(active_pokemon["move_type_ids"])
opponent_type_ids = torch.FloatTensor(FWG_ONEHOTS[opponent_pokemon["type_ids"][0]])
features = torch.cat([move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids, health, opp_health])
else:
active_pokemon_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in active_pokemon["type_ids"]])
backup_pokemon1_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in backup_pokemon1["type_ids"]])
backup_pokemon2_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in backup_pokemon2["type_ids"]])
opponent_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in opponent_pokemon["type_ids"]])
feats = [move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids]
'''for feature in feats:
print(feature.shape)'''
features = torch.cat([move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids, health, opp_health],dim=1)
if verbose == True:
print("")
print(features)
return features
def forward(self, state_dict, verbose=False):
"""State representation right now:
- team: List of pokemon object dictionaries, len = team_size
- Pokemon: Dict of {id_field : value},
-Value: is one of:
-list
-int ("ids" in id_field name): for an embedding index
-float: between 0 and 1, scalar value
-bool: for 0/1 input
- opponent_team: List of pokemon object dictionaries
"""
features = self.get_features(state_dict, self.config)
state_embedding = self.layer2(F.relu(self.layer1(features)))
'''move_powers = np.zeros(4)
moves_dmg_multiplier = np.zeros(4)
team_health = np.zeros(2)
active_pokemon = state_dict["team"][0]
moves = active_pokemon["move_ids"]
for idx, move_idx in moves:
move_name = ID_TO_STR[move_idx]
move_power = MOVES[move_name]["basePower"]
move_power = move_power * 1.0 / 150
move_powers[idx] = move_power
move_type = STR_TO_ID[MOVES[move_name]["type"]]
opponent_types = state_dict["opponent_team"][0]["type_ids"]
moves_dmg_multiplier
x = complete_state_concatenation
for layer in self.complete_state_linear_layers[:-1]:
x = F.relu(layer(x))
state_embedding = self.complete_state_linear_layers[-1](x)'''
#TODO (longterm): move residuals
return state_embedding
|
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from tqdm import trange
from copy import deepcopy
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
# import torchvision.transforms as T
from poke_env.player_configuration import PlayerConfiguration
from poke_env.player.env_player import Gen7EnvSinglePlayer
from poke_env.player.random_player import RandomPlayer
from poke_env.server_configuration import LocalhostServerConfiguration
from poke_env.player.player import Player
from sklearn.decomposition import PCA #Grab PCA functions
import matplotlib.pyplot as plt
from poke_env.data import STR_TO_ID, ID_TO_STR, MOVES
from poke_env.utils import to_id_str
import relevant_conditions
def homogenize_vectors(vectors):
tensors = []
for vector in vectors:
tensor = torch.FloatTensor(vector)
if len(tensor.shape) == 1: #Batch size is 1:
tensor = tensor.unsqueeze(0)
elif len(tensor.shape) == 3: #Batch size is 1:
tensor = tensor.squeeze(0)
tensors.append(tensor)
return tensors
FWG_ONEHOTS = {0: [0, 0, 0], 7: [1, 0, 0], 10: [0, 1, 0], 18: [0, 0, 1]}
POKEMONID_ONEHOTS = {
1036: [1, 0, 0, 0, 0, 0],
388: [0, 1, 0, 0, 0, 0],
984: [0, 0, 1, 0, 0, 0],
152: [0, 0, 0, 1, 0, 0],
439: [0, 0, 0, 0, 1, 0],
365: [0, 0, 0, 0, 0, 1],
}
class MediumBoy_DQN(nn.Module):
def __init__(self, config):
super(MediumBoy_DQN, self).__init__()
#Embedding dimension sizes
self.type_embedding_style = "twohot"
self.type_embedding = nn.Embedding(18, 18)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(18))
self.type_embedding.weight.requires_grad = False
self.input_dim = 18
self.hidden_dim = config.complete_state_hidden_dim
self.output_dim = 22
self.layers = []
self.layer1 = nn.Linear(self.input_dim, self.hidden_dim)
self.layer2 = nn.Linear(self.hidden_dim, self.output_dim)
self.layer2.weight.data.fill_(0)
self.layer2.bias.data.fill_(0)
#self.layers.append(nn.Linear(self.input_dim,config.hidden_dim))
#for i in range(1, config.num_layers):
# self.layers.append(nn.Linear(config.hidden_dim,config.hidden_dim))
#self.layers.append(nn.Linear(self.hidden_dim,config.output_dim))
def build_model(self):
self.input_dim = 0
#How will the model represent typing information?
if config.represent_types_as in ["onehot", "twohot"]:
self.type_embedding = nn.Embedding(18, 18)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(18))
self.type_embedding.weight.requires_grad = False
if config.represent_types_as == "onehot":
self.type_embedding_size = 18 * 2
else:
self.type_embedding_size = 18
else:
print("Typing rep not implemented")
sys.exit(1)
pass
if config.include_our_pokemon_species_typing == True:
self.input_dim += self.type_embedding_size * config.number_pokemon
if config.include_opponent_pokemon_species_typing == True:
self.input_dim += self.type_embedding_size #TODO: Make this a little more modular.
#How does the model represent species information?
if config.represent_species_as in ["onehot"]:
self.num_species = 6
self.species_embedding = nn.Embedding(self.num_species, self.num_species)
self.type_embedding.weight.data = torch.FloatTensor(np.eye(6))
self.type_embedding.weight.requires_grad = False
self.species_embedding_size = self.num_species
else:
print("Species rep not implemented")
sys.exit(1)
pass
if config.include_our_pokemon_species_embedding == True:
self.input_dim += self.species_embedding_size * config.number_pokemon
if config.include_opponent_pokemon_species_embedding == True:
self.input_dim += self.species_embedding_size #TODO: Make this a little more modular.
#How does the model represent health?
if config.include_our_pokemon_health == True:
self.input_dim += config.number_pokemon #Todo: make more modular (just active?)
if config.include_opponent_pokemon_health == True:
self.input_dim += 1 #Todo: make more modular (all pokemon?)
#How does the model represent move information?
if config.include_our_pokemon_move_power == True:
self.input_dim += 4 #Todo: represent back pokemon move power as well?
if config.include_our_pokemon_move_typing == True:
if config.represent_move_typing_as == "same_as_species":
self.move_typing_emb_dim = 18 #Hardcoded
else:
print("move typing emb not implemented")
sys.exit(1)
self.input_dim += 4 * self.move_typing_emb_dim
#Make our model
self.hidden_dim = config.complete_state_hidden_dim
self.output_dim = 22
self.layers = []
self.layer1 = nn.Linear(self.input_dim, self.hidden_dim)
self.layer2 = nn.Linear(self.hidden_dim, self.output_dim)
self.layer2.weight.data.fill_(0)
self.layer2.bias.data.fill_(0)
return
def get_features(self, state_dict, config):
batch_size = len(state_dict["weather"])
input_features = []
if config.include_our_pokemon_species_typing == True:
species_typing = self.type_embedding(state_dict["team"][:]["type_ids"])
if config.represent_species_as == "twohot":
species_typing = species_typing[:, :, 0, :] + species_typing[:, :, 1, :] #[batch_size, team_size (6), type_0, type_emb_dim]
input_features.append(species_typing)
if config.include_our_pokemon_species_typing == True:
opponent_species_typing = self.type_embedding(state_dict["opponent_team"][0]["type_ids"]) #TODO: modularity
if config.represent_species_as == "twohot":
opponent_species_typing = opponent_species_typing[:, 0, :] + opponent_species_typing[:, 1, :] #[batch_size, team_size (6), type_0, type_emb_dim]
input_features.append(opponent_species_typing)
if config.include_our_pokemon_species_embedding == True:
input_features.append(self.species_embedding(state_dict["team"][:]["species_id"]))
if config.include_opponent_species_embedding == True:
input_features.append(self.species_embedding(state_dict["opponent_team"][0]["species_id"])) #TODO: modularity
if len(batch_size) == 1:
features = torch.cat(input_features)
else:
features = torch.cat(input_features,dim=1))
assert len(features) = self.input_dim
active_pokemon = state_dict["team"][0]
backup_pokemon1 = state_dict["team"][1]
backup_pokemon2 = state_dict["team"][2]
move_features = torch.FloatTensor(active_pokemon["move_powers"])
opponent_pokemon = state_dict["opponent_team"][0]
opp_health = torch.FloatTensor(state_dict["opponent_team"][0]["hp_percentage"])
health = torch.FloatTensor(active_pokemon["hp_percentage"])
if len(move_features.shape) == 1:
active_pokemon_type_ids = torch.FloatTensor(FWG_ONEHOTS[active_pokemon["type_ids"][0]])
backup_pokemon1_type_ids = torch.FloatTensor(FWG_ONEHOTS[backup_pokemon1["type_ids"][0]])
backup_pokemon2_type_ids = torch.FloatTensor(FWG_ONEHOTS[backup_pokemon2["type_ids"][0]])
#type_ids = torch.LongTensor(active_pokemon["move_type_ids"])
opponent_type_ids = torch.FloatTensor(FWG_ONEHOTS[opponent_pokemon["type_ids"][0]])
features = torch.cat([move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids, health, opp_health])
else:
active_pokemon_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in active_pokemon["type_ids"]])
backup_pokemon1_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in backup_pokemon1["type_ids"]])
backup_pokemon2_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in backup_pokemon2["type_ids"]])
opponent_type_ids = torch.FloatTensor([FWG_ONEHOTS[x[0]] for x in opponent_pokemon["type_ids"]])
feats = [move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids]
'''for feature in feats:
print(feature.shape)'''
features = torch.cat([move_features, active_pokemon_type_ids, backup_pokemon1_type_ids, backup_pokemon2_type_ids, opponent_type_ids, health, opp_health],dim=1)
if verbose == True:
print("")
print(features)
return features
def forward(self, state_dict, verbose=False):
"""State representation right now:
- team: List of pokemon object dictionaries, len = team_size
- Pokemon: Dict of {id_field : value},
-Value: is one of:
-list
-int ("ids" in id_field name): for an embedding index
-float: between 0 and 1, scalar value
-bool: for 0/1 input
- opponent_team: List of pokemon object dictionaries
"""
features = self.get_features(state_dict, self.config)
state_embedding = self.layer2(F.relu(self.layer1(features)))
'''move_powers = np.zeros(4)
moves_dmg_multiplier = np.zeros(4)
team_health = np.zeros(2)
active_pokemon = state_dict["team"][0]
moves = active_pokemon["move_ids"]
for idx, move_idx in moves:
move_name = ID_TO_STR[move_idx]
move_power = MOVES[move_name]["basePower"]
move_power = move_power * 1.0 / 150
move_powers[idx] = move_power
move_type = STR_TO_ID[MOVES[move_name]["type"]]
opponent_types = state_dict["opponent_team"][0]["type_ids"]
moves_dmg_multiplier
x = complete_state_concatenation
for layer in self.complete_state_linear_layers[:-1]:
x = F.relu(layer(x))
state_embedding = self.complete_state_linear_layers[-1](x)'''
#TODO (longterm): move residuals
return state_embedding
|
en
| 0.586502
|
# from PIL import Image # import torchvision.transforms as T #Grab PCA functions #Batch size is 1: #Batch size is 1: #Embedding dimension sizes #self.layers.append(nn.Linear(self.input_dim,config.hidden_dim)) #for i in range(1, config.num_layers): # self.layers.append(nn.Linear(config.hidden_dim,config.hidden_dim)) #self.layers.append(nn.Linear(self.hidden_dim,config.output_dim)) #How will the model represent typing information? #TODO: Make this a little more modular. #How does the model represent species information? #TODO: Make this a little more modular. #How does the model represent health? #Todo: make more modular (just active?) #Todo: make more modular (all pokemon?) #How does the model represent move information? #Todo: represent back pokemon move power as well? #Hardcoded #Make our model #[batch_size, team_size (6), type_0, type_emb_dim] #TODO: modularity #[batch_size, team_size (6), type_0, type_emb_dim] #TODO: modularity #type_ids = torch.LongTensor(active_pokemon["move_type_ids"]) for feature in feats: print(feature.shape) State representation right now: - team: List of pokemon object dictionaries, len = team_size - Pokemon: Dict of {id_field : value}, -Value: is one of: -list -int ("ids" in id_field name): for an embedding index -float: between 0 and 1, scalar value -bool: for 0/1 input - opponent_team: List of pokemon object dictionaries move_powers = np.zeros(4) moves_dmg_multiplier = np.zeros(4) team_health = np.zeros(2) active_pokemon = state_dict["team"][0] moves = active_pokemon["move_ids"] for idx, move_idx in moves: move_name = ID_TO_STR[move_idx] move_power = MOVES[move_name]["basePower"] move_power = move_power * 1.0 / 150 move_powers[idx] = move_power move_type = STR_TO_ID[MOVES[move_name]["type"]] opponent_types = state_dict["opponent_team"][0]["type_ids"] moves_dmg_multiplier x = complete_state_concatenation for layer in self.complete_state_linear_layers[:-1]: x = F.relu(layer(x)) state_embedding = self.complete_state_linear_layers[-1](x) #TODO (longterm): move residuals
| 1.848829
| 2
|
embedded/uvacbot/ui/button.py
|
dpm76/Microvacbot
| 1
|
6627656
|
<reponame>dpm76/Microvacbot<filename>embedded/uvacbot/ui/button.py<gh_stars>1-10
'''
Created on 1 may. 2020
@author: David
'''
from micropython import schedule
from pyb import Pin, Timer
from utime import sleep_ms
class Button(object):
'''
This button can handle short and long press
'''
def __init__(self, pin, timerId = 6, thresholdTime = 1000, lowOnPress=False):
'''
Constructor
@param pin: Pin object where the button is
@param timerId: (default=6) Timer to determine the long press
@param thresholdTime: Waiting time to determine a long press as milliseconds
@param lowOnPress: Indicates whether the value is 0 when the button is pressed (pull-down)
The user (blue) button on the NUCLEO-L476RG board must have this parameter as True,
but for the case of the NUCLEO-F767ZI board, this parameter must be False
'''
self._pin = Pin(pin)
self._pin.init(mode=Pin.IN)
self._pin.irq(handler=self._onPinIrq)
self._lowOnPress = lowOnPress
self._thresholdTime = thresholdTime
self._pressTime = 0
self._timer = Timer(timerId)
self._shortPressHandler = None
self._longPressHandler = None
self._isTimeout = False
def _onPinIrq(self, _):
self._pin.irq(handler=None)
self._timer.callback(None)
self._timer.deinit()
#debounce signal
sleep_ms(50)
if self._pin.value() == (1 if self._lowOnPress else 0):
if not self._isTimeout:
schedule(self._shortPressHandler, self)
else:
self._isTimeout = False
self._timer.init(freq=1000/self._thresholdTime, callback=self._onTimeout)
self._pin.irq(handler=self._onPinIrq)
def _onTimeout(self, t):
t.deinit()
self._isTimeout = True
schedule(self._longPressHandler, self)
def setShortPressHandler(self, handler):
'''
Sets the handler for short press
'''
self._shortPressHandler = handler
return self
def setLongPressHandler(self, handler):
'''
Sets the handler for long press
'''
self._longPressHandler = handler
return self
def cleanup(self):
'''
Releases resources
Deinits the timer and removes handler for the pin's IRQ
'''
self._timer.deinit()
self._pin.irq(handler=None)
|
'''
Created on 1 may. 2020
@author: David
'''
from micropython import schedule
from pyb import Pin, Timer
from utime import sleep_ms
class Button(object):
'''
This button can handle short and long press
'''
def __init__(self, pin, timerId = 6, thresholdTime = 1000, lowOnPress=False):
'''
Constructor
@param pin: Pin object where the button is
@param timerId: (default=6) Timer to determine the long press
@param thresholdTime: Waiting time to determine a long press as milliseconds
@param lowOnPress: Indicates whether the value is 0 when the button is pressed (pull-down)
The user (blue) button on the NUCLEO-L476RG board must have this parameter as True,
but for the case of the NUCLEO-F767ZI board, this parameter must be False
'''
self._pin = Pin(pin)
self._pin.init(mode=Pin.IN)
self._pin.irq(handler=self._onPinIrq)
self._lowOnPress = lowOnPress
self._thresholdTime = thresholdTime
self._pressTime = 0
self._timer = Timer(timerId)
self._shortPressHandler = None
self._longPressHandler = None
self._isTimeout = False
def _onPinIrq(self, _):
self._pin.irq(handler=None)
self._timer.callback(None)
self._timer.deinit()
#debounce signal
sleep_ms(50)
if self._pin.value() == (1 if self._lowOnPress else 0):
if not self._isTimeout:
schedule(self._shortPressHandler, self)
else:
self._isTimeout = False
self._timer.init(freq=1000/self._thresholdTime, callback=self._onTimeout)
self._pin.irq(handler=self._onPinIrq)
def _onTimeout(self, t):
t.deinit()
self._isTimeout = True
schedule(self._longPressHandler, self)
def setShortPressHandler(self, handler):
'''
Sets the handler for short press
'''
self._shortPressHandler = handler
return self
def setLongPressHandler(self, handler):
'''
Sets the handler for long press
'''
self._longPressHandler = handler
return self
def cleanup(self):
'''
Releases resources
Deinits the timer and removes handler for the pin's IRQ
'''
self._timer.deinit()
self._pin.irq(handler=None)
|
en
| 0.658647
|
Created on 1 may. 2020
@author: David This button can handle short and long press Constructor
@param pin: Pin object where the button is
@param timerId: (default=6) Timer to determine the long press
@param thresholdTime: Waiting time to determine a long press as milliseconds
@param lowOnPress: Indicates whether the value is 0 when the button is pressed (pull-down)
The user (blue) button on the NUCLEO-L476RG board must have this parameter as True,
but for the case of the NUCLEO-F767ZI board, this parameter must be False #debounce signal Sets the handler for short press Sets the handler for long press Releases resources
Deinits the timer and removes handler for the pin's IRQ
| 3.011693
| 3
|
homeassistant/components/device_tracker/device_trigger.py
|
SNoof85/core
| 2
|
6627657
|
"""Provides device automations for Device Tracker."""
from __future__ import annotations
from typing import Any, Final
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.zone import DOMAIN as DOMAIN_ZONE, trigger as zone
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
CONF_ZONE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN
TRIGGER_TYPES: Final[set[str]] = {"enters", "leaves"}
TRIGGER_SCHEMA: Final = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Required(CONF_ZONE): cv.entity_domain(DOMAIN_ZONE),
}
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for Device Tracker devices."""
registry = entity_registry.async_get(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "enters",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "leaves",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] == "enters":
event = zone.EVENT_ENTER
else:
event = zone.EVENT_LEAVE
zone_config = {
CONF_PLATFORM: DOMAIN_ZONE,
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
CONF_ZONE: config[CONF_ZONE],
CONF_EVENT: event,
}
zone_config = await zone.async_validate_trigger_config(hass, zone_config)
return await zone.async_attach_trigger(
hass, zone_config, action, automation_info, platform_type="device"
)
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
zones = {
ent.entity_id: ent.name
for ent in sorted(hass.states.async_all(DOMAIN_ZONE), key=lambda ent: ent.name)
}
return {
"extra_fields": vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(zones),
}
)
}
|
"""Provides device automations for Device Tracker."""
from __future__ import annotations
from typing import Any, Final
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.zone import DOMAIN as DOMAIN_ZONE, trigger as zone
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
CONF_ZONE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN
TRIGGER_TYPES: Final[set[str]] = {"enters", "leaves"}
TRIGGER_SCHEMA: Final = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Required(CONF_ZONE): cv.entity_domain(DOMAIN_ZONE),
}
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for Device Tracker devices."""
registry = entity_registry.async_get(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "enters",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "leaves",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] == "enters":
event = zone.EVENT_ENTER
else:
event = zone.EVENT_LEAVE
zone_config = {
CONF_PLATFORM: DOMAIN_ZONE,
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
CONF_ZONE: config[CONF_ZONE],
CONF_EVENT: event,
}
zone_config = await zone.async_validate_trigger_config(hass, zone_config)
return await zone.async_attach_trigger(
hass, zone_config, action, automation_info, platform_type="device"
)
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
zones = {
ent.entity_id: ent.name
for ent in sorted(hass.states.async_all(DOMAIN_ZONE), key=lambda ent: ent.name)
}
return {
"extra_fields": vol.Schema(
{
vol.Required(CONF_ZONE): vol.In(zones),
}
)
}
|
en
| 0.791259
|
Provides device automations for Device Tracker. List device triggers for Device Tracker devices. # Get all the integrations entities for this device Attach a trigger. List trigger capabilities.
| 2.056854
| 2
|
wx_app.py
|
wolfg1969/my-wechat-app
| 1
|
6627658
|
<reponame>wolfg1969/my-wechat-app
# coding=utf-8
from flask import Flask
from flask.ext.redis import FlaskRedis
from redis import StrictRedis
from wechat_sdk import WechatConf
app = Flask(__name__)
app.config.from_envvar('MY_WECHAT_APP_SETTINGS')
redis_store = FlaskRedis.from_custom_provider(StrictRedis, app)
wechat_conf = WechatConf(
token=app.config['WX_TOKEN'],
appid=app.config['WX_APP_ID'],
appsecret=app.config['WX_APP_SECRET'],
encrypt_mode=app.config['WX_ENCRYPT_MODE'],
encoding_aes_key=app.config['WX_ENCODING_AES_KEY']
)
|
# coding=utf-8
from flask import Flask
from flask.ext.redis import FlaskRedis
from redis import StrictRedis
from wechat_sdk import WechatConf
app = Flask(__name__)
app.config.from_envvar('MY_WECHAT_APP_SETTINGS')
redis_store = FlaskRedis.from_custom_provider(StrictRedis, app)
wechat_conf = WechatConf(
token=app.config['WX_TOKEN'],
appid=app.config['WX_APP_ID'],
appsecret=app.config['WX_APP_SECRET'],
encrypt_mode=app.config['WX_ENCRYPT_MODE'],
encoding_aes_key=app.config['WX_ENCODING_AES_KEY']
)
|
en
| 0.644078
|
# coding=utf-8
| 1.843225
| 2
|
pollbot/telegram/keyboard/date_picker.py
|
3wille/ultimate-poll-bot
| 0
|
6627659
|
<filename>pollbot/telegram/keyboard/date_picker.py
"""Reply keyboards."""
import calendar
from datetime import date
from telegram import (
InlineKeyboardButton,
)
from pollbot.helper.enums import CallbackType
def get_datepicker_buttons(poll):
"""Get the buttons for the datepicker."""
current_date = poll.current_date
if current_date is None:
current_date = date.now()
poll.current_date = current_date
buttons = []
ignore_payload = f'{CallbackType.ignore.value}:0:0'
# Add headline
headline = f'{calendar.month_name[current_date.month]} {current_date.year}'
buttons.append([InlineKeyboardButton(headline, callback_data=ignore_payload)])
# Create the week-day column description
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=ignore_payload))
buttons.append(row)
# Iterate through all days and create respective buttons
calendar_month = calendar.monthcalendar(current_date.year, current_date.month)
for week in calendar_month:
row = []
for day in week:
# Format the text. The currently chosen day should be surrounded by brackets e.g (26)
day_text = day
if day > 0:
this_date = date(year=current_date.year, month=current_date.month, day=day)
if this_date == current_date:
day_text = f'({day})'
# Only create real buttons for actual days of the month
if(day == 0):
row.append(InlineKeyboardButton(" ", callback_data=ignore_payload))
else:
day_date = date(current_date.year, current_date.month, day)
payload = f'{CallbackType.set_date.value}:{poll.id}:{day_date.isoformat()}'
row.append(InlineKeyboardButton(day_text, callback_data=payload))
buttons.append(row)
previous_payload = f'{CallbackType.previous_month.value}:{poll.id}:0'
next_payload = f'{CallbackType.next_month.value}:{poll.id}:0'
buttons.append([
InlineKeyboardButton('<', callback_data=previous_payload),
InlineKeyboardButton('>', callback_data=next_payload),
])
return buttons
|
<filename>pollbot/telegram/keyboard/date_picker.py
"""Reply keyboards."""
import calendar
from datetime import date
from telegram import (
InlineKeyboardButton,
)
from pollbot.helper.enums import CallbackType
def get_datepicker_buttons(poll):
"""Get the buttons for the datepicker."""
current_date = poll.current_date
if current_date is None:
current_date = date.now()
poll.current_date = current_date
buttons = []
ignore_payload = f'{CallbackType.ignore.value}:0:0'
# Add headline
headline = f'{calendar.month_name[current_date.month]} {current_date.year}'
buttons.append([InlineKeyboardButton(headline, callback_data=ignore_payload)])
# Create the week-day column description
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=ignore_payload))
buttons.append(row)
# Iterate through all days and create respective buttons
calendar_month = calendar.monthcalendar(current_date.year, current_date.month)
for week in calendar_month:
row = []
for day in week:
# Format the text. The currently chosen day should be surrounded by brackets e.g (26)
day_text = day
if day > 0:
this_date = date(year=current_date.year, month=current_date.month, day=day)
if this_date == current_date:
day_text = f'({day})'
# Only create real buttons for actual days of the month
if(day == 0):
row.append(InlineKeyboardButton(" ", callback_data=ignore_payload))
else:
day_date = date(current_date.year, current_date.month, day)
payload = f'{CallbackType.set_date.value}:{poll.id}:{day_date.isoformat()}'
row.append(InlineKeyboardButton(day_text, callback_data=payload))
buttons.append(row)
previous_payload = f'{CallbackType.previous_month.value}:{poll.id}:0'
next_payload = f'{CallbackType.next_month.value}:{poll.id}:0'
buttons.append([
InlineKeyboardButton('<', callback_data=previous_payload),
InlineKeyboardButton('>', callback_data=next_payload),
])
return buttons
|
en
| 0.75588
|
Reply keyboards. Get the buttons for the datepicker. # Add headline # Create the week-day column description # Iterate through all days and create respective buttons # Format the text. The currently chosen day should be surrounded by brackets e.g (26) # Only create real buttons for actual days of the month
| 3.373142
| 3
|
musicbatch/transcoder/app.py
|
sio/musicbatch
| 0
|
6627660
|
'''
CLI application for transcoding music files
'''
import os
import json
import platform
import time
import sys
from argparse import ArgumentParser
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from pkg_resources import resource_string
from subprocess import Popen, DEVNULL
from threading import Thread
import mutagen
from jsonschema import Draft7Validator as JSONSchemaValidator
from ruamel import yaml
from musicbatch.transcoder import (
CONFIG_ENCODING,
DEFAULT_CONFIG,
LOSSLESS_EXTENSIONS,
)
from musicbatch.transcoder.encoders import (
AACTranscoder,
LameTranscoder,
OpusTranscoder,
SymlinkCreator,
VerbatimFileCopy,
VorbisTranscoder,
)
from musicbatch.transcoder.cover import copy_coverart
from musicbatch.transcoder.lyrics import copy_lyrics, read_lyrics
from musicbatch.transcoder.progress import (
TranscodingStats,
show_progress,
)
from musicbatch.transcoder.queue import (
TranscodingQueue,
execute_in_threadqueue,
)
from musicbatch.lyrics.db import LyricsStorage
import logging
log = logging.getLogger(__name__)
def run(*a, **ka):
'''
CLI entry point
'''
# 1. Load config from YAML
# 2. Find relevant music files and add them to queue
# 3. Concurrently process each file in the queue:
# - Calculate target location
# - Transcode
# - Fill tags
# - Copy lyrics
# - Copy cover art
args = parse_args(*a, **ka)
if args.newconfig:
with open(args.config, 'wb') as config:
config.write(resource_string(__name__.rsplit('.', 1)[0], 'sample.yml'))
edit_file(args.config)
return
job = TranscodingJob(args.config)
tasks = TranscodingQueue(job.inputs, job.output_pattern)
with restore_stdin():
show_progress(job) # start progress report thread
execute_in_threadqueue(job.transcode, tasks, buffer_size=20)
job.finished = True # terminate progress report thread
job.write_report()
def parse_args(*a, prog=None, **ka):
parser = ArgumentParser(
description='Batch transcode music files according to the provided configuration file',
epilog='This program relies on FFmpeg <http://ffmpeg.org> for audio encoding. Please make sure it\'s installed',
prog=prog,
)
parser.add_argument(
'config',
metavar='CONFIG',
help='Path to YAML description of the transcoding job',
)
parser.add_argument(
'--newconfig',
action='store_true',
default=False,
help='Create new configuration file from template and open it for editing',
)
args = parser.parse_args(*a, **ka)
if args.newconfig and os.path.exists(args.config):
parser.error('File already exists: {}'.format(args.config))
return args
@contextmanager
def restore_stdin():
'''Restore standard input in terminal (pydub's subprocesses mess with it)'''
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
yield
try:
Popen(['stty', 'echo'], stdout=DEVNULL, stderr=DEVNULL)
except Exception:
pass
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def edit_file(path):
'''
Open text file for editing in default application. Current script process
will be immediately terminated and replaced with editor.
'''
try:
os.startfile(path)
except AttributeError:
if platform.system() == 'Darwin':
command = ['open', path]
elif os.environ.get('EDITOR'):
command = [os.environ.get('EDITOR'), path]
elif os.environ.get('VISUAL'):
command = [os.environ.get('VISUAL'), path]
else:
command = ['xdg-open', path]
os.execvp(command[0], command)
class TranscodingJob:
'''Store essential parameters of the transcoding job'''
ENCODERS = {
None: VorbisTranscoder,
'copy': VerbatimFileCopy,
'symlink': SymlinkCreator,
'vorbis': VorbisTranscoder,
'lame': LameTranscoder,
'mp3': LameTranscoder,
'aac': AACTranscoder,
'm4a': AACTranscoder,
'opus': OpusTranscoder,
}
def __init__(self, config_file):
'''Initialize transcoding job'''
self.stats = TranscodingStats()
self.finished = False
self.config_file = config_file
self._timestamp = None
with open(config_file, encoding=CONFIG_ENCODING) as f:
config = yaml.load(f, Loader=yaml.RoundTripLoader)
output = config.get('output', {})
extras = config.get('extras', {})
self.validate(config)
self.job_id = config.get('name', DEFAULT_CONFIG['name'])
self.inputs = config.get('input', [])
self.output_dir = output.get('directory')
self.output_pattern = output.get('pattern', DEFAULT_CONFIG['pattern'])
self.cover_size = extras.get('cover', DEFAULT_CONFIG['cover'])
if output.get('category_blacklist'):
self.select_mode = 'blacklist'
self.select = set(output.get('category_blacklist'))
elif output.get('category_whitelist'):
self.select_mode = 'whitelist'
self.select = set(output.get('category_whitelist'))
else:
self.select_mode = None
self.select = set()
lyrics_source = extras.get('lyrics', DEFAULT_CONFIG['lyrics'])
if not lyrics_source:
self.get_lyrics = None
elif os.path.isdir(lyrics_source):
self.get_lyrics = partial(read_lyrics, lyricsdir=lyrics_source)
elif os.path.isfile(lyrics_source):
database = LyricsStorage(lyrics_source)
self.get_lyrics = database.get
else:
self.get_lyrics = None
encoder = output.get('format', DEFAULT_CONFIG['format'])
quality = output.get('quality', DEFAULT_CONFIG['quality'])
self.transcoder = self.ENCODERS.get(encoder)(quality)
lossy_action = output.get('lossy_source', DEFAULT_CONFIG['lossy_source'])
if lossy_action == 'allow_bad_transcodes'\
or encoder == 'symlink' \
or encoder == 'copy':
self.lossy_action = self.transcoder
elif lossy_action == 'copy':
self.lossy_action = VerbatimFileCopy()
elif lossy_action == 'skip':
skip_marker = self.transcoder.STATUS_SKIP
self.lossy_action = lambda infile, outfile: (infile, skip_marker)
self.lossy_action.STATUS_SKIP = skip_marker
os.makedirs(self.output_dir, exist_ok=True)
log.debug('Initialized {}'.format(self))
def __repr__(self):
return '{cls}({config!r})'.format(
cls=self.__class__.__name__,
config=self.config_file,
)
def transcode(self, task):
'''Execute a single transcoding task'''
log.debug('Started {task}'.format(task=task))
if (self.select_mode == 'blacklist' and self.select.intersection(task.categories)) \
or (self.select_mode == 'whitelist' and not self.select.intersection(task.categories)):
self.stats.record_skip()
log.debug('Skipped {task}'.format(task=task))
return
source_format = os.path.splitext(task.source)[1][1:].lower()
if source_format in LOSSLESS_EXTENSIONS:
worker = self.transcoder
else:
worker = self.lossy_action
if self._timestamp is None: # record the time of first transcoding task
self._timestamp = int(time.time())
# Step 1: Transcode
task.result, task.status = worker(
task.source,
os.path.join(self.output_dir, task.target)
)
# Step 1a: Process extras (cover art, lyrics)
if self.cover_size:
Thread(
target=copy_coverart,
kwargs=dict(task=task, size=self.cover_size)
).start()
if self.get_lyrics:
Thread(
target=copy_lyrics,
kwargs=dict(task=task, lyrics_finder=self.get_lyrics),
).start()
# Handle skipped transcodes
if task.status is worker.STATUS_SKIP:
if os.path.getmtime(task.result) > self.timestamp:
raise RuntimeError('Target path collision for {}'.format(task.result))
self.stats.record_skip()
log.debug('Skipped {task}'.format(task=task))
return
# Step 2: Copy music tags
if not task.status is worker.STATUS_SKIPTAGS:
result = mutagen.File(task.result, easy=True)
for key in task.tags.keys(): # mutagen is inconsistent about `for k in t.tags`
if hasattr(result.tags, 'valid_keys') \
and key not in result.tags.valid_keys:
continue
result.tags[key] = task.tags[key]
result.save()
self.stats.record_done()
log.debug('Finished {task}'.format(task=task))
@property
def timestamp(self):
'''
Date and time of starting the first transcoding task in this job
(in Unix time format)
'''
return self._timestamp
def write_report(self):
'''Keep a journal of transcoder runs'''
log_entry = '{time}Z: {stats}\n'.format(
time = datetime.utcnow().replace(microsecond=0),
stats = self.stats.show().strip(),
)
with open(os.path.join(self.output_dir, 'transcoding.log'), 'a') as logfile:
logfile.write(log_entry)
def validate(self, config):
'''Validate transcoding job configuration'''
try:
self.validator
except AttributeError:
package = __name__.rsplit('.', 1)[0]
path = 'schema.json'
schema = json.loads(resource_string(package, path).decode())
self.validator = JSONSchemaValidator(schema)
error_messages = []
for error in self.validator.iter_errors(config):
error_messages.append(' - {}: {}'.format(
'.'.join(error.path) if error.path else '[config]',
error.message
))
if error_messages:
raise ValueError('invalid configuration values:\n{}'.format(
'\n'.join(sorted(error_messages))
))
|
'''
CLI application for transcoding music files
'''
import os
import json
import platform
import time
import sys
from argparse import ArgumentParser
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from pkg_resources import resource_string
from subprocess import Popen, DEVNULL
from threading import Thread
import mutagen
from jsonschema import Draft7Validator as JSONSchemaValidator
from ruamel import yaml
from musicbatch.transcoder import (
CONFIG_ENCODING,
DEFAULT_CONFIG,
LOSSLESS_EXTENSIONS,
)
from musicbatch.transcoder.encoders import (
AACTranscoder,
LameTranscoder,
OpusTranscoder,
SymlinkCreator,
VerbatimFileCopy,
VorbisTranscoder,
)
from musicbatch.transcoder.cover import copy_coverart
from musicbatch.transcoder.lyrics import copy_lyrics, read_lyrics
from musicbatch.transcoder.progress import (
TranscodingStats,
show_progress,
)
from musicbatch.transcoder.queue import (
TranscodingQueue,
execute_in_threadqueue,
)
from musicbatch.lyrics.db import LyricsStorage
import logging
log = logging.getLogger(__name__)
def run(*a, **ka):
'''
CLI entry point
'''
# 1. Load config from YAML
# 2. Find relevant music files and add them to queue
# 3. Concurrently process each file in the queue:
# - Calculate target location
# - Transcode
# - Fill tags
# - Copy lyrics
# - Copy cover art
args = parse_args(*a, **ka)
if args.newconfig:
with open(args.config, 'wb') as config:
config.write(resource_string(__name__.rsplit('.', 1)[0], 'sample.yml'))
edit_file(args.config)
return
job = TranscodingJob(args.config)
tasks = TranscodingQueue(job.inputs, job.output_pattern)
with restore_stdin():
show_progress(job) # start progress report thread
execute_in_threadqueue(job.transcode, tasks, buffer_size=20)
job.finished = True # terminate progress report thread
job.write_report()
def parse_args(*a, prog=None, **ka):
parser = ArgumentParser(
description='Batch transcode music files according to the provided configuration file',
epilog='This program relies on FFmpeg <http://ffmpeg.org> for audio encoding. Please make sure it\'s installed',
prog=prog,
)
parser.add_argument(
'config',
metavar='CONFIG',
help='Path to YAML description of the transcoding job',
)
parser.add_argument(
'--newconfig',
action='store_true',
default=False,
help='Create new configuration file from template and open it for editing',
)
args = parser.parse_args(*a, **ka)
if args.newconfig and os.path.exists(args.config):
parser.error('File already exists: {}'.format(args.config))
return args
@contextmanager
def restore_stdin():
'''Restore standard input in terminal (pydub's subprocesses mess with it)'''
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
yield
try:
Popen(['stty', 'echo'], stdout=DEVNULL, stderr=DEVNULL)
except Exception:
pass
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def edit_file(path):
'''
Open text file for editing in default application. Current script process
will be immediately terminated and replaced with editor.
'''
try:
os.startfile(path)
except AttributeError:
if platform.system() == 'Darwin':
command = ['open', path]
elif os.environ.get('EDITOR'):
command = [os.environ.get('EDITOR'), path]
elif os.environ.get('VISUAL'):
command = [os.environ.get('VISUAL'), path]
else:
command = ['xdg-open', path]
os.execvp(command[0], command)
class TranscodingJob:
'''Store essential parameters of the transcoding job'''
ENCODERS = {
None: VorbisTranscoder,
'copy': VerbatimFileCopy,
'symlink': SymlinkCreator,
'vorbis': VorbisTranscoder,
'lame': LameTranscoder,
'mp3': LameTranscoder,
'aac': AACTranscoder,
'm4a': AACTranscoder,
'opus': OpusTranscoder,
}
def __init__(self, config_file):
'''Initialize transcoding job'''
self.stats = TranscodingStats()
self.finished = False
self.config_file = config_file
self._timestamp = None
with open(config_file, encoding=CONFIG_ENCODING) as f:
config = yaml.load(f, Loader=yaml.RoundTripLoader)
output = config.get('output', {})
extras = config.get('extras', {})
self.validate(config)
self.job_id = config.get('name', DEFAULT_CONFIG['name'])
self.inputs = config.get('input', [])
self.output_dir = output.get('directory')
self.output_pattern = output.get('pattern', DEFAULT_CONFIG['pattern'])
self.cover_size = extras.get('cover', DEFAULT_CONFIG['cover'])
if output.get('category_blacklist'):
self.select_mode = 'blacklist'
self.select = set(output.get('category_blacklist'))
elif output.get('category_whitelist'):
self.select_mode = 'whitelist'
self.select = set(output.get('category_whitelist'))
else:
self.select_mode = None
self.select = set()
lyrics_source = extras.get('lyrics', DEFAULT_CONFIG['lyrics'])
if not lyrics_source:
self.get_lyrics = None
elif os.path.isdir(lyrics_source):
self.get_lyrics = partial(read_lyrics, lyricsdir=lyrics_source)
elif os.path.isfile(lyrics_source):
database = LyricsStorage(lyrics_source)
self.get_lyrics = database.get
else:
self.get_lyrics = None
encoder = output.get('format', DEFAULT_CONFIG['format'])
quality = output.get('quality', DEFAULT_CONFIG['quality'])
self.transcoder = self.ENCODERS.get(encoder)(quality)
lossy_action = output.get('lossy_source', DEFAULT_CONFIG['lossy_source'])
if lossy_action == 'allow_bad_transcodes'\
or encoder == 'symlink' \
or encoder == 'copy':
self.lossy_action = self.transcoder
elif lossy_action == 'copy':
self.lossy_action = VerbatimFileCopy()
elif lossy_action == 'skip':
skip_marker = self.transcoder.STATUS_SKIP
self.lossy_action = lambda infile, outfile: (infile, skip_marker)
self.lossy_action.STATUS_SKIP = skip_marker
os.makedirs(self.output_dir, exist_ok=True)
log.debug('Initialized {}'.format(self))
def __repr__(self):
return '{cls}({config!r})'.format(
cls=self.__class__.__name__,
config=self.config_file,
)
def transcode(self, task):
'''Execute a single transcoding task'''
log.debug('Started {task}'.format(task=task))
if (self.select_mode == 'blacklist' and self.select.intersection(task.categories)) \
or (self.select_mode == 'whitelist' and not self.select.intersection(task.categories)):
self.stats.record_skip()
log.debug('Skipped {task}'.format(task=task))
return
source_format = os.path.splitext(task.source)[1][1:].lower()
if source_format in LOSSLESS_EXTENSIONS:
worker = self.transcoder
else:
worker = self.lossy_action
if self._timestamp is None: # record the time of first transcoding task
self._timestamp = int(time.time())
# Step 1: Transcode
task.result, task.status = worker(
task.source,
os.path.join(self.output_dir, task.target)
)
# Step 1a: Process extras (cover art, lyrics)
if self.cover_size:
Thread(
target=copy_coverart,
kwargs=dict(task=task, size=self.cover_size)
).start()
if self.get_lyrics:
Thread(
target=copy_lyrics,
kwargs=dict(task=task, lyrics_finder=self.get_lyrics),
).start()
# Handle skipped transcodes
if task.status is worker.STATUS_SKIP:
if os.path.getmtime(task.result) > self.timestamp:
raise RuntimeError('Target path collision for {}'.format(task.result))
self.stats.record_skip()
log.debug('Skipped {task}'.format(task=task))
return
# Step 2: Copy music tags
if not task.status is worker.STATUS_SKIPTAGS:
result = mutagen.File(task.result, easy=True)
for key in task.tags.keys(): # mutagen is inconsistent about `for k in t.tags`
if hasattr(result.tags, 'valid_keys') \
and key not in result.tags.valid_keys:
continue
result.tags[key] = task.tags[key]
result.save()
self.stats.record_done()
log.debug('Finished {task}'.format(task=task))
@property
def timestamp(self):
'''
Date and time of starting the first transcoding task in this job
(in Unix time format)
'''
return self._timestamp
def write_report(self):
'''Keep a journal of transcoder runs'''
log_entry = '{time}Z: {stats}\n'.format(
time = datetime.utcnow().replace(microsecond=0),
stats = self.stats.show().strip(),
)
with open(os.path.join(self.output_dir, 'transcoding.log'), 'a') as logfile:
logfile.write(log_entry)
def validate(self, config):
'''Validate transcoding job configuration'''
try:
self.validator
except AttributeError:
package = __name__.rsplit('.', 1)[0]
path = 'schema.json'
schema = json.loads(resource_string(package, path).decode())
self.validator = JSONSchemaValidator(schema)
error_messages = []
for error in self.validator.iter_errors(config):
error_messages.append(' - {}: {}'.format(
'.'.join(error.path) if error.path else '[config]',
error.message
))
if error_messages:
raise ValueError('invalid configuration values:\n{}'.format(
'\n'.join(sorted(error_messages))
))
|
en
| 0.773996
|
CLI application for transcoding music files CLI entry point # 1. Load config from YAML # 2. Find relevant music files and add them to queue # 3. Concurrently process each file in the queue: # - Calculate target location # - Transcode # - Fill tags # - Copy lyrics # - Copy cover art # start progress report thread # terminate progress report thread Restore standard input in terminal (pydub's subprocesses mess with it) Open text file for editing in default application. Current script process will be immediately terminated and replaced with editor. Store essential parameters of the transcoding job Initialize transcoding job Execute a single transcoding task # record the time of first transcoding task # Step 1: Transcode # Step 1a: Process extras (cover art, lyrics) # Handle skipped transcodes # Step 2: Copy music tags # mutagen is inconsistent about `for k in t.tags` Date and time of starting the first transcoding task in this job (in Unix time format) Keep a journal of transcoder runs Validate transcoding job configuration
| 2.466315
| 2
|
timer.py
|
niclaswue/retimer
| 0
|
6627661
|
#!/usr/local/bin/python3
import argparse
import tempfile
import subprocess
parser = argparse.ArgumentParser(description="Set timer")
parser.add_argument(
"commands",
metavar="N",
type=str,
nargs="+",
help="end dates for timer or intervals",
)
parser.add_argument("--name", type=str, help="event name")
args = parser.parse_args()
commands = args.commands
def parse_interval_string(time_interval: str) -> int:
"""Return minutes from interval string
Args:
time_interval (str): time interval string
Returns:
int: minutes in interval
"""
# append stop char
time_interval = f"{time_interval}!"
interval_minutes = 0
if "d" in time_interval:
days, time_interval = time_interval.split("d")
interval_minutes += int(days) * 60 * 24
if "h" in time_interval:
hours, time_interval = time_interval.split("h")
interval_minutes += int(hours) * 60
if "m" in time_interval:
minutes, time_interval = time_interval.split("m")
interval_minutes += int(minutes)
assert time_interval == "!"
return interval_minutes
def set_reminder(minutes: int, set_time: bool = False, name: str = "Timer"):
name = name or "Timer"
script = """
tell application "Reminders"
# Calculate date time for midnight today
set currentDay to (current date) - (time of (current date))
"""
if set_time:
script += f"set theDate to currentDay + ({minutes} * minutes)"
else:
script += f"set theDate to currentDay + (time of (current date)) + ({minutes} * minutes)"
script += f"""
# Select the relevant list in Reminders.app
set myList to list "Timer"
tell myList
# Create the reminder
set newReminder to make new reminder
set name of newReminder to "{name}"
set remind me date of newReminder to theDate
end tell
end tell
"""
with tempfile.NamedTemporaryFile(suffix="applescript") as temp:
temp.write(bytes(script, encoding="utf-8"))
temp.flush()
subprocess.call(["osascript", temp.name])
print(f"✅ Set Timer '{name}' for {minutes} minutes. Specific time is {set_time}")
if __name__ == "__main__":
for cmd in commands:
if cmd[0] == "@":
# specific time today like @17:20
hours, minutes = [int(v) for v in cmd.replace("@", "").split(":")]
set_reminder(hours * 60 + minutes, set_time=True, name=args.name)
elif "x" in cmd:
# periodic event 5x20m
amount, rest = cmd.split("x")
amount = int(amount)
if "+" in rest:
# periodic event with break like 5x20m+5m
interval, break_time = rest.split("+")
period = parse_interval_string(interval)
break_time = parse_interval_string(break_time)
for i in range(1, amount + 1):
set_reminder(
(i * period + (i - 1) * break_time),
set_time=False,
name=f"{args.name} - Begin Break {i}",
)
set_reminder(
(i * period + i * break_time),
set_time=False,
name=f"{args.name} - End Break {i}",
)
else:
period = parse_interval_string(rest)
for i in range(1, amount + 1):
set_reminder(i * period, set_time=False, name=f"{args.name} - {i}")
elif cmd[-1] in ["h", "m", "d"]:
# time delta like 5h or 10m or 2d
set_reminder(parse_interval_string(cmd), set_time=False, name=args.name)
else:
raise NotImplementedError(str(args.commands))
|
#!/usr/local/bin/python3
import argparse
import tempfile
import subprocess
parser = argparse.ArgumentParser(description="Set timer")
parser.add_argument(
"commands",
metavar="N",
type=str,
nargs="+",
help="end dates for timer or intervals",
)
parser.add_argument("--name", type=str, help="event name")
args = parser.parse_args()
commands = args.commands
def parse_interval_string(time_interval: str) -> int:
"""Return minutes from interval string
Args:
time_interval (str): time interval string
Returns:
int: minutes in interval
"""
# append stop char
time_interval = f"{time_interval}!"
interval_minutes = 0
if "d" in time_interval:
days, time_interval = time_interval.split("d")
interval_minutes += int(days) * 60 * 24
if "h" in time_interval:
hours, time_interval = time_interval.split("h")
interval_minutes += int(hours) * 60
if "m" in time_interval:
minutes, time_interval = time_interval.split("m")
interval_minutes += int(minutes)
assert time_interval == "!"
return interval_minutes
def set_reminder(minutes: int, set_time: bool = False, name: str = "Timer"):
name = name or "Timer"
script = """
tell application "Reminders"
# Calculate date time for midnight today
set currentDay to (current date) - (time of (current date))
"""
if set_time:
script += f"set theDate to currentDay + ({minutes} * minutes)"
else:
script += f"set theDate to currentDay + (time of (current date)) + ({minutes} * minutes)"
script += f"""
# Select the relevant list in Reminders.app
set myList to list "Timer"
tell myList
# Create the reminder
set newReminder to make new reminder
set name of newReminder to "{name}"
set remind me date of newReminder to theDate
end tell
end tell
"""
with tempfile.NamedTemporaryFile(suffix="applescript") as temp:
temp.write(bytes(script, encoding="utf-8"))
temp.flush()
subprocess.call(["osascript", temp.name])
print(f"✅ Set Timer '{name}' for {minutes} minutes. Specific time is {set_time}")
if __name__ == "__main__":
for cmd in commands:
if cmd[0] == "@":
# specific time today like @17:20
hours, minutes = [int(v) for v in cmd.replace("@", "").split(":")]
set_reminder(hours * 60 + minutes, set_time=True, name=args.name)
elif "x" in cmd:
# periodic event 5x20m
amount, rest = cmd.split("x")
amount = int(amount)
if "+" in rest:
# periodic event with break like 5x20m+5m
interval, break_time = rest.split("+")
period = parse_interval_string(interval)
break_time = parse_interval_string(break_time)
for i in range(1, amount + 1):
set_reminder(
(i * period + (i - 1) * break_time),
set_time=False,
name=f"{args.name} - Begin Break {i}",
)
set_reminder(
(i * period + i * break_time),
set_time=False,
name=f"{args.name} - End Break {i}",
)
else:
period = parse_interval_string(rest)
for i in range(1, amount + 1):
set_reminder(i * period, set_time=False, name=f"{args.name} - {i}")
elif cmd[-1] in ["h", "m", "d"]:
# time delta like 5h or 10m or 2d
set_reminder(parse_interval_string(cmd), set_time=False, name=args.name)
else:
raise NotImplementedError(str(args.commands))
|
en
| 0.732912
|
#!/usr/local/bin/python3 Return minutes from interval string Args: time_interval (str): time interval string Returns: int: minutes in interval # append stop char tell application "Reminders" # Calculate date time for midnight today set currentDay to (current date) - (time of (current date)) # Select the relevant list in Reminders.app set myList to list "Timer" tell myList # Create the reminder set newReminder to make new reminder set name of newReminder to "{name}" set remind me date of newReminder to theDate end tell end tell # specific time today like @17:20 # periodic event 5x20m # periodic event with break like 5x20m+5m # time delta like 5h or 10m or 2d
| 3.275675
| 3
|
serve.py
|
Hindol/python-gcm-server
| 1
|
6627662
|
__author__ = 'hadhya'
from gcm import GcmClient
from config import CONFIG
from optparse import OptionParser
import logging
def start_xmpp_server():
client = GcmClient(CONFIG['GCM_API_KEY'])
client.listen(CONFIG['GCM_SENDER_ID'], on_message)
def on_message(message):
print message
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
start_xmpp_server()
|
__author__ = 'hadhya'
from gcm import GcmClient
from config import CONFIG
from optparse import OptionParser
import logging
def start_xmpp_server():
client = GcmClient(CONFIG['GCM_API_KEY'])
client.listen(CONFIG['GCM_SENDER_ID'], on_message)
def on_message(message):
print message
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
start_xmpp_server()
|
en
| 0.664972
|
# Setup the command line arguments. # Output verbosity options. # Setup logging.
| 2.249662
| 2
|
scripts/2.1.split data into train and validation.py
|
nmningmei/BOLD5000_autoencoder
| 9
|
6627663
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 13:25:01 2019
@author: nmei
"""
import os
import numpy as np
from glob import glob
from tqdm import tqdm
from shutil import copyfile
from sklearn.model_selection import train_test_split
data_dir = '../../BOLD5000_data_store/data/volume_of_interest/'
train_dir = '../data/train/'
validation_dir = '../data/validation/'
for d in [train_dir,validation_dir]:
if not os.path.exists(d):
os.makedirs(d)
subsample_for_test = int(1e4)
all_files = glob(os.path.join(data_dir,"*.nii.gz"))
if type(subsample_for_test) == int:
all_files = np.random.choice(all_files,size = subsample_for_test,replace = True,)
train,test = train_test_split(all_files,test_size = 0.2,random_state = 12345)
for f in tqdm(train):
copyfile(f,os.path.join(train_dir,f.split('/')[-1]))
for f in tqdm(test):
copyfile(f,os.path.join(validation_dir,f.split('/')[-1]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 13:25:01 2019
@author: nmei
"""
import os
import numpy as np
from glob import glob
from tqdm import tqdm
from shutil import copyfile
from sklearn.model_selection import train_test_split
data_dir = '../../BOLD5000_data_store/data/volume_of_interest/'
train_dir = '../data/train/'
validation_dir = '../data/validation/'
for d in [train_dir,validation_dir]:
if not os.path.exists(d):
os.makedirs(d)
subsample_for_test = int(1e4)
all_files = glob(os.path.join(data_dir,"*.nii.gz"))
if type(subsample_for_test) == int:
all_files = np.random.choice(all_files,size = subsample_for_test,replace = True,)
train,test = train_test_split(all_files,test_size = 0.2,random_state = 12345)
for f in tqdm(train):
copyfile(f,os.path.join(train_dir,f.split('/')[-1]))
for f in tqdm(test):
copyfile(f,os.path.join(validation_dir,f.split('/')[-1]))
|
en
| 0.434291
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Mon Aug 26 13:25:01 2019 @author: nmei
| 2.226265
| 2
|
src/train_interface.py
|
sunprinceS/MetaASR-CrossAccent
| 9
|
6627664
|
<reponame>sunprinceS/MetaASR-CrossAccent
import pickle
import time
from shutil import rmtree
from pathlib import Path
from tqdm import tqdm
from src.marcos import *
from src.io.dataset import get_loader
from src.monitor.metric import Metric
import src.monitor.logger as logger
from torchexp.stat import RunningAvgDict
class TrainInterface:
def __init__(self, config, paras, id2accent):
### config setting
self.config = config
self.paras = paras
self.train_type = 'evaluation'
self.is_memmap = paras.is_memmap
self.is_bucket = paras.is_bucket
self.model_name = paras.model_name
self.eval_ival = config['solver']['eval_ival']
self.log_ival = config['solver']['log_ival']
self.half_batch_ilen = config['solver']['half_batch_ilen'],
self.dev_max_ilen = config['solver']['dev_max_ilen']
self.best_wer = INIT_BEST_ER
self.best_cer = INIT_BEST_ER
if self.paras.model_name == 'transformer':
self.id2units = [SOS_SYMBOL]
with open(config['solver']['spm_mapping']) as fin:
for line in fin.readlines():
self.id2units.append(line.rstrip().split(' ')[0])
self.id2units.append(EOS_SYMBOL)
self.metric_observer = Metric(config['solver']['spm_model'], self.id2units, 0, len(self.id2units)-1)
elif self.paras.model_name == 'blstm':
self.id2units = [BLANK_SYMBOL]
with open(config['solver']['spm_mapping']) as fin:
for line in fin.readlines():
self.id2units.append(line.rstrip().split(' ')[0])
self.id2units.append(EOS_SYMBOL)
self.metric_observer = Metric(config['solver']['spm_model'], self.id2units, len(self.id2units)-1, len(self.id2units)-1)
else:
raise ValueError(f"Unknown model name {self.paras.model_name}")
self.save_verbose = paras.save_verbose
#######################################################################
### Set path
cur_path = Path.cwd()
if paras.pretrain:
assert paras.pretrain_suffix or paras.pretrain_model_path, \
"You should specify pretrain model and the corresponding prefix"
if paras.pretrain_model_path:
self.pretrain_model_path = Path(paras.pretrain_model_path)
else:
assert paras.pretrain_suffix and paras.pretrain_setting and paras.pretrain_step > 0, "Should specify pretrain_setting"
self.pretrain_model_path = Path(cur_path, LOG_DIR, 'pretrain', \
paras.pretrain_setting, paras.algo, \
paras.pretrain_suffix, \
id2accent[paras.pretrain_tgt_accent],\
str(paras.pretrain_runs), f"snapshot.step.{paras.pretrain_step}")
assert self.pretrain_model_path.exists(), \
f"Pretrain model path {self.pretrain_model_path} not exists"
self.pretrain_module = config['solver']['pretrain_module']
else:
assert paras.pretrain_suffix is None and paras.algo == 'no', \
f"Training from scratch shouldn't have meta-learner {paras.algo} and pretrain_suffix"
paras.pretrain_suffix = paras.eval_suffix
self.accent= id2accent[paras.accent]
self.data_dir = Path(config['solver']['data_root'], self.accent)
self.log_dir = Path(cur_path, LOG_DIR,self.train_type, \
config['solver']['setting'], paras.algo, \
paras.pretrain_suffix, paras.eval_suffix, \
self.accent, str(paras.runs))
########################################################################
### Resume mechanism
if not paras.resume:
if self.log_dir.exists():
assert paras.overwrite, \
f"Path exists ({self.log_dir}). Use --overwrite or change suffix"
# time.sleep(10)
logger.warning('Overwrite existing directory')
rmtree(self.log_dir)
self.log_dir.mkdir(parents=True)
self.train_info = RunningAvgDict(decay_rate=0.99)
self.global_step = 1
self.ep = 0
else:
self.resume_model_path = self.log_dir.joinpath('snapshot.latest')
info_dict_path = self.log_dir.joinpath('info_dict.latest')
self.optimizer_path = self.log_dir.joinpath('optimizer.latest')
assert self.optimizer_path.exists(), \
f"Optimizer state {self.optimizer_path} not exists..."
with open(Path(self.log_dir, 'epoch'),'r') as f:
self.ep = int(f.read().strip())
with open(Path(self.log_dir, 'global_step'),'r') as f:
self.global_step = int(f.read().strip())
with open(Path(self.log_dir, 'best_wer'), 'r') as f:
self.best_wer = float(f.read().strip().split(' ')[1])
with open(Path(self.log_dir, 'best_cer'),'r') as f:
self.best_cer = float(f.read().strip().split(' ')[1])
assert self.resume_model_path.exists(),\
f"{self.resume_model_path} not exists..."
assert info_dict_path.exists(),\
f"Training info {info_dict_path} not exists..."
with open(info_dict_path, 'rb') as fin:
self.train_info = pickle.load(fin)
if paras.use_tensorboard:
from src.monitor.tb_dashboard import Dashboard
logger.warning("Use tensorboard instead of comet")
else:
from src.monitor.dashboard import Dashboard
self.dashboard = Dashboard(config, paras, self.log_dir, \
self.train_type, paras.resume)
def load_data(self):
logger.notice(f"Loading data from {self.data_dir} with {self.paras.njobs} threads")
#TODO: combine the following with Metric
self.id2ch = self.id2units
setattr(self, 'train_set', get_loader(
self.data_dir.joinpath('train'),
batch_size=self.config['solver']['batch_size'],
min_ilen = self.config['solver']['min_ilen'],
max_ilen = self.config['solver']['max_ilen'],
half_batch_ilen = self.config['solver']['half_batch_ilen'],
# bucket_reverse=True,
bucket_reverse=False,
is_memmap = self.is_memmap,
is_bucket = self.is_bucket,
num_workers = self.paras.njobs,
# shuffle=False, #debug
))
setattr(self, 'dev_set', get_loader(
self.data_dir.joinpath('dev'),
batch_size = self.config['solver']['dev_batch_size'],
is_memmap = self.is_memmap,
is_bucket = False,
shuffle = False,
num_workers = self.paras.njobs,
))
def write_log(self, k, v):
with open(self.log_dir.joinpath(k),'a') as fout:
print(f'{self.global_step} {v}', file=fout)
def log_msg(self,lr=None):
if self.global_step % self.log_ival == 0:
logger.log_info(self.train_info, prefix='train')
self.dashboard.log_info('train', self.train_info)
if lr is not None: # transformer
self.dashboard.log_other('lr', lr)
def write_logs(self, dev_info):
for k, v in dev_info.items():
self.write_log(f"dev_{k}", float(v))
for k, v in self.train_info.items():
self.write_log(f"train_{k}", float(v))
|
import pickle
import time
from shutil import rmtree
from pathlib import Path
from tqdm import tqdm
from src.marcos import *
from src.io.dataset import get_loader
from src.monitor.metric import Metric
import src.monitor.logger as logger
from torchexp.stat import RunningAvgDict
class TrainInterface:
def __init__(self, config, paras, id2accent):
### config setting
self.config = config
self.paras = paras
self.train_type = 'evaluation'
self.is_memmap = paras.is_memmap
self.is_bucket = paras.is_bucket
self.model_name = paras.model_name
self.eval_ival = config['solver']['eval_ival']
self.log_ival = config['solver']['log_ival']
self.half_batch_ilen = config['solver']['half_batch_ilen'],
self.dev_max_ilen = config['solver']['dev_max_ilen']
self.best_wer = INIT_BEST_ER
self.best_cer = INIT_BEST_ER
if self.paras.model_name == 'transformer':
self.id2units = [SOS_SYMBOL]
with open(config['solver']['spm_mapping']) as fin:
for line in fin.readlines():
self.id2units.append(line.rstrip().split(' ')[0])
self.id2units.append(EOS_SYMBOL)
self.metric_observer = Metric(config['solver']['spm_model'], self.id2units, 0, len(self.id2units)-1)
elif self.paras.model_name == 'blstm':
self.id2units = [BLANK_SYMBOL]
with open(config['solver']['spm_mapping']) as fin:
for line in fin.readlines():
self.id2units.append(line.rstrip().split(' ')[0])
self.id2units.append(EOS_SYMBOL)
self.metric_observer = Metric(config['solver']['spm_model'], self.id2units, len(self.id2units)-1, len(self.id2units)-1)
else:
raise ValueError(f"Unknown model name {self.paras.model_name}")
self.save_verbose = paras.save_verbose
#######################################################################
### Set path
cur_path = Path.cwd()
if paras.pretrain:
assert paras.pretrain_suffix or paras.pretrain_model_path, \
"You should specify pretrain model and the corresponding prefix"
if paras.pretrain_model_path:
self.pretrain_model_path = Path(paras.pretrain_model_path)
else:
assert paras.pretrain_suffix and paras.pretrain_setting and paras.pretrain_step > 0, "Should specify pretrain_setting"
self.pretrain_model_path = Path(cur_path, LOG_DIR, 'pretrain', \
paras.pretrain_setting, paras.algo, \
paras.pretrain_suffix, \
id2accent[paras.pretrain_tgt_accent],\
str(paras.pretrain_runs), f"snapshot.step.{paras.pretrain_step}")
assert self.pretrain_model_path.exists(), \
f"Pretrain model path {self.pretrain_model_path} not exists"
self.pretrain_module = config['solver']['pretrain_module']
else:
assert paras.pretrain_suffix is None and paras.algo == 'no', \
f"Training from scratch shouldn't have meta-learner {paras.algo} and pretrain_suffix"
paras.pretrain_suffix = paras.eval_suffix
self.accent= id2accent[paras.accent]
self.data_dir = Path(config['solver']['data_root'], self.accent)
self.log_dir = Path(cur_path, LOG_DIR,self.train_type, \
config['solver']['setting'], paras.algo, \
paras.pretrain_suffix, paras.eval_suffix, \
self.accent, str(paras.runs))
########################################################################
### Resume mechanism
if not paras.resume:
if self.log_dir.exists():
assert paras.overwrite, \
f"Path exists ({self.log_dir}). Use --overwrite or change suffix"
# time.sleep(10)
logger.warning('Overwrite existing directory')
rmtree(self.log_dir)
self.log_dir.mkdir(parents=True)
self.train_info = RunningAvgDict(decay_rate=0.99)
self.global_step = 1
self.ep = 0
else:
self.resume_model_path = self.log_dir.joinpath('snapshot.latest')
info_dict_path = self.log_dir.joinpath('info_dict.latest')
self.optimizer_path = self.log_dir.joinpath('optimizer.latest')
assert self.optimizer_path.exists(), \
f"Optimizer state {self.optimizer_path} not exists..."
with open(Path(self.log_dir, 'epoch'),'r') as f:
self.ep = int(f.read().strip())
with open(Path(self.log_dir, 'global_step'),'r') as f:
self.global_step = int(f.read().strip())
with open(Path(self.log_dir, 'best_wer'), 'r') as f:
self.best_wer = float(f.read().strip().split(' ')[1])
with open(Path(self.log_dir, 'best_cer'),'r') as f:
self.best_cer = float(f.read().strip().split(' ')[1])
assert self.resume_model_path.exists(),\
f"{self.resume_model_path} not exists..."
assert info_dict_path.exists(),\
f"Training info {info_dict_path} not exists..."
with open(info_dict_path, 'rb') as fin:
self.train_info = pickle.load(fin)
if paras.use_tensorboard:
from src.monitor.tb_dashboard import Dashboard
logger.warning("Use tensorboard instead of comet")
else:
from src.monitor.dashboard import Dashboard
self.dashboard = Dashboard(config, paras, self.log_dir, \
self.train_type, paras.resume)
def load_data(self):
logger.notice(f"Loading data from {self.data_dir} with {self.paras.njobs} threads")
#TODO: combine the following with Metric
self.id2ch = self.id2units
setattr(self, 'train_set', get_loader(
self.data_dir.joinpath('train'),
batch_size=self.config['solver']['batch_size'],
min_ilen = self.config['solver']['min_ilen'],
max_ilen = self.config['solver']['max_ilen'],
half_batch_ilen = self.config['solver']['half_batch_ilen'],
# bucket_reverse=True,
bucket_reverse=False,
is_memmap = self.is_memmap,
is_bucket = self.is_bucket,
num_workers = self.paras.njobs,
# shuffle=False, #debug
))
setattr(self, 'dev_set', get_loader(
self.data_dir.joinpath('dev'),
batch_size = self.config['solver']['dev_batch_size'],
is_memmap = self.is_memmap,
is_bucket = False,
shuffle = False,
num_workers = self.paras.njobs,
))
def write_log(self, k, v):
with open(self.log_dir.joinpath(k),'a') as fout:
print(f'{self.global_step} {v}', file=fout)
def log_msg(self,lr=None):
if self.global_step % self.log_ival == 0:
logger.log_info(self.train_info, prefix='train')
self.dashboard.log_info('train', self.train_info)
if lr is not None: # transformer
self.dashboard.log_other('lr', lr)
def write_logs(self, dev_info):
for k, v in dev_info.items():
self.write_log(f"dev_{k}", float(v))
for k, v in self.train_info.items():
self.write_log(f"train_{k}", float(v))
|
de
| 0.529322
|
### config setting ####################################################################### ### Set path ######################################################################## ### Resume mechanism # time.sleep(10) #TODO: combine the following with Metric # bucket_reverse=True, # shuffle=False, #debug # transformer
| 1.9192
| 2
|
benchmarks/streamable.py
|
ftruzzi/chia-blockchain
| 1
|
6627665
|
<gh_stars>1-10
from dataclasses import dataclass
from enum import Enum
from statistics import stdev
from time import process_time as clock
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import click
from utils import EnumType, rand_bytes, rand_full_block, rand_hash
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.util.ints import uint8, uint64
from chia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BenchmarkInner(Streamable):
a: str
@dataclass(frozen=True)
@streamable
class BenchmarkMiddle(Streamable):
a: uint64
b: List[bytes32]
c: Tuple[str, bool, uint8, List[bytes]]
d: Tuple[BenchmarkInner, BenchmarkInner]
e: BenchmarkInner
@dataclass(frozen=True)
@streamable
class BenchmarkClass(Streamable):
a: Optional[BenchmarkMiddle]
b: Optional[BenchmarkMiddle]
c: BenchmarkMiddle
d: List[BenchmarkMiddle]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle]
def get_random_inner() -> BenchmarkInner:
return BenchmarkInner(rand_bytes(20).hex())
def get_random_middle() -> BenchmarkMiddle:
a: uint64 = uint64(10)
b: List[bytes32] = [rand_hash() for _ in range(a)]
c: Tuple[str, bool, uint8, List[bytes]] = ("benchmark", False, uint8(1), [rand_bytes(a) for _ in range(a)])
d: Tuple[BenchmarkInner, BenchmarkInner] = (get_random_inner(), get_random_inner())
e: BenchmarkInner = get_random_inner()
return BenchmarkMiddle(a, b, c, d, e)
def get_random_benchmark_object() -> BenchmarkClass:
a: Optional[BenchmarkMiddle] = None
b: Optional[BenchmarkMiddle] = get_random_middle()
c: BenchmarkMiddle = get_random_middle()
d: List[BenchmarkMiddle] = [get_random_middle() for _ in range(5)]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] = (
get_random_middle(),
get_random_middle(),
get_random_middle(),
)
return BenchmarkClass(a, b, c, d, e)
def print_row(
*,
mode: str,
us_per_iteration: Union[str, float],
stdev_us_per_iteration: Union[str, float],
avg_iterations: Union[str, int],
stdev_iterations: Union[str, float],
end: str = "\n",
) -> None:
mode = "{0:<10}".format(f"{mode}")
us_per_iteration = "{0:<12}".format(f"{us_per_iteration}")
stdev_us_per_iteration = "{0:>20}".format(f"{stdev_us_per_iteration}")
avg_iterations = "{0:>18}".format(f"{avg_iterations}")
stdev_iterations = "{0:>22}".format(f"{stdev_iterations}")
print(f"{mode} | {us_per_iteration} | {stdev_us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end)
# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Data(str, Enum):
all = "all"
benchmark = "benchmark"
full_block = "full_block"
# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Mode(str, Enum):
all = "all"
creation = "creation"
to_bytes = "to_bytes"
from_bytes = "from_bytes"
to_json = "to_json"
from_json = "from_json"
def to_bytes(obj: Any) -> bytes:
return bytes(obj)
@dataclass
class ModeParameter:
conversion_cb: Callable[[Any], Any]
preparation_cb: Optional[Callable[[Any], Any]] = None
@dataclass
class BenchmarkParameter:
data_class: Type[Any]
object_creation_cb: Callable[[], Any]
mode_parameter: Dict[Mode, Optional[ModeParameter]]
benchmark_parameter: Dict[Data, BenchmarkParameter] = {
Data.benchmark: BenchmarkParameter(
BenchmarkClass,
get_random_benchmark_object,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(BenchmarkClass.from_bytes, to_bytes),
Mode.to_json: ModeParameter(BenchmarkClass.to_json_dict),
Mode.from_json: ModeParameter(BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict),
},
),
Data.full_block: BenchmarkParameter(
FullBlock,
rand_full_block,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(FullBlock.from_bytes, to_bytes),
Mode.to_json: ModeParameter(FullBlock.to_json_dict),
Mode.from_json: ModeParameter(FullBlock.from_json_dict, FullBlock.to_json_dict),
},
),
}
def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]:
us_iteration_results: List[int] = []
start = clock()
while int((clock() - start) * 1000) < ms_to_run:
start_iteration = clock()
cb()
stop_iteration = clock()
us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000))
return us_iteration_results
def calc_stdev_percent(iterations: List[int], avg: float) -> float:
deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100
return int((deviation / avg * 100) * 100) / 100
@click.command()
@click.option("-d", "--data", default=Data.all, type=EnumType(Data))
@click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode))
@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results")
@click.option("-t", "--ms", default=50, help="Milliseconds per run")
@click.option("--live/--no-live", default=False, help="Print live results (slower)")
def run(data: Data, mode: Mode, runs: int, ms: int, live: bool) -> None:
results: Dict[Data, Dict[Mode, List[List[int]]]] = {}
for current_data, parameter in benchmark_parameter.items():
results[current_data] = {}
if data == Data.all or current_data == data:
print(f"\nruns: {runs}, ms/run: {ms}, benchmarks: {mode.name}, data: {parameter.data_class.__name__}")
print_row(
mode="mode",
us_per_iteration="µs/iteration",
stdev_us_per_iteration="stdev µs/iteration %",
avg_iterations="avg iterations/run",
stdev_iterations="stdev iterations/run %",
)
for current_mode, current_mode_parameter in parameter.mode_parameter.items():
results[current_data][current_mode] = []
if mode == Mode.all or current_mode == mode:
us_iteration_results: List[int]
all_results: List[List[int]] = results[current_data][current_mode]
obj = parameter.object_creation_cb()
def print_results(print_run: int, final: bool) -> None:
all_runtimes: List[int] = [x for inner in all_results for x in inner]
total_iterations: int = len(all_runtimes)
total_elapsed_us: int = sum(all_runtimes)
avg_iterations: float = total_iterations / print_run
stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations)
stdev_us_per_iteration: float = calc_stdev_percent(
all_runtimes, total_elapsed_us / total_iterations
)
print_row(
mode=current_mode.name,
us_per_iteration=int(total_elapsed_us / total_iterations * 100) / 100,
stdev_us_per_iteration=stdev_us_per_iteration,
avg_iterations=int(avg_iterations),
stdev_iterations=stdev_iterations,
end="\n" if final else "\r",
)
current_run: int = 0
while current_run < runs:
current_run += 1
if current_mode == Mode.creation:
cls = type(obj)
us_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms)
else:
assert current_mode_parameter is not None
conversion_cb = current_mode_parameter.conversion_cb
assert conversion_cb is not None
prepared_obj = parameter.object_creation_cb()
if current_mode_parameter.preparation_cb is not None:
prepared_obj = current_mode_parameter.preparation_cb(obj)
us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms)
all_results.append(us_iteration_results)
if live:
print_results(current_run, False)
assert current_run == runs
print_results(runs, True)
if __name__ == "__main__":
run() # pylint: disable = no-value-for-parameter
|
from dataclasses import dataclass
from enum import Enum
from statistics import stdev
from time import process_time as clock
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import click
from utils import EnumType, rand_bytes, rand_full_block, rand_hash
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.util.ints import uint8, uint64
from chia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BenchmarkInner(Streamable):
a: str
@dataclass(frozen=True)
@streamable
class BenchmarkMiddle(Streamable):
a: uint64
b: List[bytes32]
c: Tuple[str, bool, uint8, List[bytes]]
d: Tuple[BenchmarkInner, BenchmarkInner]
e: BenchmarkInner
@dataclass(frozen=True)
@streamable
class BenchmarkClass(Streamable):
a: Optional[BenchmarkMiddle]
b: Optional[BenchmarkMiddle]
c: BenchmarkMiddle
d: List[BenchmarkMiddle]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle]
def get_random_inner() -> BenchmarkInner:
return BenchmarkInner(rand_bytes(20).hex())
def get_random_middle() -> BenchmarkMiddle:
a: uint64 = uint64(10)
b: List[bytes32] = [rand_hash() for _ in range(a)]
c: Tuple[str, bool, uint8, List[bytes]] = ("benchmark", False, uint8(1), [rand_bytes(a) for _ in range(a)])
d: Tuple[BenchmarkInner, BenchmarkInner] = (get_random_inner(), get_random_inner())
e: BenchmarkInner = get_random_inner()
return BenchmarkMiddle(a, b, c, d, e)
def get_random_benchmark_object() -> BenchmarkClass:
a: Optional[BenchmarkMiddle] = None
b: Optional[BenchmarkMiddle] = get_random_middle()
c: BenchmarkMiddle = get_random_middle()
d: List[BenchmarkMiddle] = [get_random_middle() for _ in range(5)]
e: Tuple[BenchmarkMiddle, BenchmarkMiddle, BenchmarkMiddle] = (
get_random_middle(),
get_random_middle(),
get_random_middle(),
)
return BenchmarkClass(a, b, c, d, e)
def print_row(
*,
mode: str,
us_per_iteration: Union[str, float],
stdev_us_per_iteration: Union[str, float],
avg_iterations: Union[str, int],
stdev_iterations: Union[str, float],
end: str = "\n",
) -> None:
mode = "{0:<10}".format(f"{mode}")
us_per_iteration = "{0:<12}".format(f"{us_per_iteration}")
stdev_us_per_iteration = "{0:>20}".format(f"{stdev_us_per_iteration}")
avg_iterations = "{0:>18}".format(f"{avg_iterations}")
stdev_iterations = "{0:>22}".format(f"{stdev_iterations}")
print(f"{mode} | {us_per_iteration} | {stdev_us_per_iteration} | {avg_iterations} | {stdev_iterations}", end=end)
# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Data(str, Enum):
all = "all"
benchmark = "benchmark"
full_block = "full_block"
# The strings in this Enum are by purpose. See benchmark.utils.EnumType.
class Mode(str, Enum):
all = "all"
creation = "creation"
to_bytes = "to_bytes"
from_bytes = "from_bytes"
to_json = "to_json"
from_json = "from_json"
def to_bytes(obj: Any) -> bytes:
return bytes(obj)
@dataclass
class ModeParameter:
conversion_cb: Callable[[Any], Any]
preparation_cb: Optional[Callable[[Any], Any]] = None
@dataclass
class BenchmarkParameter:
data_class: Type[Any]
object_creation_cb: Callable[[], Any]
mode_parameter: Dict[Mode, Optional[ModeParameter]]
benchmark_parameter: Dict[Data, BenchmarkParameter] = {
Data.benchmark: BenchmarkParameter(
BenchmarkClass,
get_random_benchmark_object,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(BenchmarkClass.from_bytes, to_bytes),
Mode.to_json: ModeParameter(BenchmarkClass.to_json_dict),
Mode.from_json: ModeParameter(BenchmarkClass.from_json_dict, BenchmarkClass.to_json_dict),
},
),
Data.full_block: BenchmarkParameter(
FullBlock,
rand_full_block,
{
Mode.creation: None,
Mode.to_bytes: ModeParameter(to_bytes),
Mode.from_bytes: ModeParameter(FullBlock.from_bytes, to_bytes),
Mode.to_json: ModeParameter(FullBlock.to_json_dict),
Mode.from_json: ModeParameter(FullBlock.from_json_dict, FullBlock.to_json_dict),
},
),
}
def run_for_ms(cb: Callable[[], Any], ms_to_run: int = 100) -> List[int]:
us_iteration_results: List[int] = []
start = clock()
while int((clock() - start) * 1000) < ms_to_run:
start_iteration = clock()
cb()
stop_iteration = clock()
us_iteration_results.append(int((stop_iteration - start_iteration) * 1000 * 1000))
return us_iteration_results
def calc_stdev_percent(iterations: List[int], avg: float) -> float:
deviation = 0 if len(iterations) < 2 else int(stdev(iterations) * 100) / 100
return int((deviation / avg * 100) * 100) / 100
@click.command()
@click.option("-d", "--data", default=Data.all, type=EnumType(Data))
@click.option("-m", "--mode", default=Mode.all, type=EnumType(Mode))
@click.option("-r", "--runs", default=100, help="Number of benchmark runs to average results")
@click.option("-t", "--ms", default=50, help="Milliseconds per run")
@click.option("--live/--no-live", default=False, help="Print live results (slower)")
def run(data: Data, mode: Mode, runs: int, ms: int, live: bool) -> None:
results: Dict[Data, Dict[Mode, List[List[int]]]] = {}
for current_data, parameter in benchmark_parameter.items():
results[current_data] = {}
if data == Data.all or current_data == data:
print(f"\nruns: {runs}, ms/run: {ms}, benchmarks: {mode.name}, data: {parameter.data_class.__name__}")
print_row(
mode="mode",
us_per_iteration="µs/iteration",
stdev_us_per_iteration="stdev µs/iteration %",
avg_iterations="avg iterations/run",
stdev_iterations="stdev iterations/run %",
)
for current_mode, current_mode_parameter in parameter.mode_parameter.items():
results[current_data][current_mode] = []
if mode == Mode.all or current_mode == mode:
us_iteration_results: List[int]
all_results: List[List[int]] = results[current_data][current_mode]
obj = parameter.object_creation_cb()
def print_results(print_run: int, final: bool) -> None:
all_runtimes: List[int] = [x for inner in all_results for x in inner]
total_iterations: int = len(all_runtimes)
total_elapsed_us: int = sum(all_runtimes)
avg_iterations: float = total_iterations / print_run
stdev_iterations: float = calc_stdev_percent([len(x) for x in all_results], avg_iterations)
stdev_us_per_iteration: float = calc_stdev_percent(
all_runtimes, total_elapsed_us / total_iterations
)
print_row(
mode=current_mode.name,
us_per_iteration=int(total_elapsed_us / total_iterations * 100) / 100,
stdev_us_per_iteration=stdev_us_per_iteration,
avg_iterations=int(avg_iterations),
stdev_iterations=stdev_iterations,
end="\n" if final else "\r",
)
current_run: int = 0
while current_run < runs:
current_run += 1
if current_mode == Mode.creation:
cls = type(obj)
us_iteration_results = run_for_ms(lambda: cls(**obj.__dict__), ms)
else:
assert current_mode_parameter is not None
conversion_cb = current_mode_parameter.conversion_cb
assert conversion_cb is not None
prepared_obj = parameter.object_creation_cb()
if current_mode_parameter.preparation_cb is not None:
prepared_obj = current_mode_parameter.preparation_cb(obj)
us_iteration_results = run_for_ms(lambda: conversion_cb(prepared_obj), ms)
all_results.append(us_iteration_results)
if live:
print_results(current_run, False)
assert current_run == runs
print_results(runs, True)
if __name__ == "__main__":
run() # pylint: disable = no-value-for-parameter
|
en
| 0.800402
|
# The strings in this Enum are by purpose. See benchmark.utils.EnumType. # The strings in this Enum are by purpose. See benchmark.utils.EnumType. # pylint: disable = no-value-for-parameter
| 2.272369
| 2
|
cs15211/DistinctSubsequencesII.py
|
JulyKikuAkita/PythonPrac
| 1
|
6627666
|
__source__ = 'https://leetcode.com/problems/distinct-subsequences-ii/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 940. Distinct Subsequences II
#
# Given a string S, count the number of distinct, non-empty subsequences of S .
#
# Since the result may be large, return the answer modulo 10^9 + 7.
#
# Example 1:
#
# Input: "abc"
# Output: 7
# Explanation: The 7 distinct subsequences are "a", "b", "c", "ab", "ac", "bc", and "abc".
# Example 2:
#
# Input: "aba"
# Output: 6
# Explanation: The 6 distinct subsequences are "a", "b", "ab", "ba", "aa" and "aba".
# Example 3:
#
# Input: "aaa"
# Output: 3
# Explanation: The 3 distinct subsequences are "a", "aa" and "aaa".
#
# Note:
#
# S contains only lowercase letters.
# 1 <= S.length <= 2000
#
import unittest
# 40ms 84.31%
class Solution(object):
def distinctSubseqII(self, S):
"""
:type S: str
:rtype: int
"""
dp = [1]
last = {}
for i, x in enumerate(S):
dp.append(dp[-1] * 2)
if x in last:
dp[-1] -= dp[last[x]]
last[x] = i
return (dp[-1] - 1) % (10**9 + 7)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/distinct-subsequences-ii/solution/
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N), where N is the length of S.
Space Complexity: O(N). It is possible to adapt this solution to take O(1) space.
Naively, for say, S = "abcx", we have dp[k] = dp[k-1] * 2.
This is because for dp[2] which counts ("", "a", "b", "c", "ab", "ac", "bc", "abc"),
dp[3] counts all of those, plus all of those with the x ending,
like ("x", "ax", "bx", "cx", "abx", "acx", "bcx", "abcx").
However, for something like S = "abab", let's play around with it. We have:
dp[0] = 2, as it counts ("", "a")
dp[1] = 4, as it counts ("", "a", "b", "ab");
dp[2] = 7 as it counts ("", "a", "b", "aa", "ab", "ba", "aba");
dp[3] = 12, as it counts ("", "a", "b", "aa", "ab", "ba", "bb", "aab", "aba", "abb", "bab", "abab").
We have that dp[3]countsdp[2], plus("b", "aa", "ab", "ba", "aba")with"b"added to it.
Notice that("", "a")are missing from this list, as they get double counted.
In general, the sequences that resulted from putting"b"the last time (ie."b", "ab"`) will get double counted.
This insight leads to the recurrence:
Dp = dp[k] = 2 * dp[k-1] - dp[last[S[k]] - 1] (in case of double counting)
# 10ms 66.47%
class Solution {
public int distinctSubseqII(String S) {
int MOD = 1_000_000_007; // 10e7;
int N = S.length();
int[] dp = new int[N + 1];
dp[0] = 1;
int[] last = new int[26];
Arrays.fill(last, -1);
for (int i = 0; i < N; ++i) {
int x = S.charAt(i) - 'a';
dp[i + 1] = dp[i] * 2 % MOD;
if (last[x] >= 0) {
dp[i + 1] -= dp[last[x]];
}
dp[i + 1] %= MOD;
last[x] = i;
}
dp[N]--;
if (dp[N] < 0) dp[N] += MOD;
return dp[N];
}
}
# 11ms 60.16%
class Solution {
public int distinctSubseqII(String S) {
// a b c
// a -> a
// b -> ab b
// c -> ac abc bc c
// 1 append 2 skip
// a b a
// a -> a a ""
// b -> ab b ab b a ""
// a -> aa aba ba a aba ba aa a ab b a "" -> exclude all answers end with current char
long[] counter = new long[26];
long prevSum = 0;
long mod = (long)(1e9 + 7);
long curr = 0;
for (int i = 0; i < S.length(); i++) {
// append
curr = prevSum + 1; // append to all previous sub sequences or append itself
int index = S.charAt(i) - 'a';
counter[index] = curr;
for (int j = 0; j < 26; j++) {
if (j != index) {
curr += counter[j];
}
}
curr = curr % mod;
prevSum = curr; // result of substring [0, i]
}
return (int)curr;
}
}
# 8ms 79.79%
class Solution {
public int distinctSubseqII(String S) {
int[] dict = new int[26]; // Save 'total' count when a character appears.
int total = 1; //Empty string, starting at count 1
for (char c : S.toCharArray()) {
int combo = total * 2 - dict[c - 'a']; // New - Duplicates
dict[c - 'a'] = total; // if 'c' ever appears again, it will clash with the current combos.
total = combo < 0 ? combo + 1000000007 : combo % 1000000007; // mod and fix negative mods
}
return total - 1; // Subtract the empty string
}
}
'''
|
__source__ = 'https://leetcode.com/problems/distinct-subsequences-ii/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 940. Distinct Subsequences II
#
# Given a string S, count the number of distinct, non-empty subsequences of S .
#
# Since the result may be large, return the answer modulo 10^9 + 7.
#
# Example 1:
#
# Input: "abc"
# Output: 7
# Explanation: The 7 distinct subsequences are "a", "b", "c", "ab", "ac", "bc", and "abc".
# Example 2:
#
# Input: "aba"
# Output: 6
# Explanation: The 6 distinct subsequences are "a", "b", "ab", "ba", "aa" and "aba".
# Example 3:
#
# Input: "aaa"
# Output: 3
# Explanation: The 3 distinct subsequences are "a", "aa" and "aaa".
#
# Note:
#
# S contains only lowercase letters.
# 1 <= S.length <= 2000
#
import unittest
# 40ms 84.31%
class Solution(object):
def distinctSubseqII(self, S):
"""
:type S: str
:rtype: int
"""
dp = [1]
last = {}
for i, x in enumerate(S):
dp.append(dp[-1] * 2)
if x in last:
dp[-1] -= dp[last[x]]
last[x] = i
return (dp[-1] - 1) % (10**9 + 7)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/distinct-subsequences-ii/solution/
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N), where N is the length of S.
Space Complexity: O(N). It is possible to adapt this solution to take O(1) space.
Naively, for say, S = "abcx", we have dp[k] = dp[k-1] * 2.
This is because for dp[2] which counts ("", "a", "b", "c", "ab", "ac", "bc", "abc"),
dp[3] counts all of those, plus all of those with the x ending,
like ("x", "ax", "bx", "cx", "abx", "acx", "bcx", "abcx").
However, for something like S = "abab", let's play around with it. We have:
dp[0] = 2, as it counts ("", "a")
dp[1] = 4, as it counts ("", "a", "b", "ab");
dp[2] = 7 as it counts ("", "a", "b", "aa", "ab", "ba", "aba");
dp[3] = 12, as it counts ("", "a", "b", "aa", "ab", "ba", "bb", "aab", "aba", "abb", "bab", "abab").
We have that dp[3]countsdp[2], plus("b", "aa", "ab", "ba", "aba")with"b"added to it.
Notice that("", "a")are missing from this list, as they get double counted.
In general, the sequences that resulted from putting"b"the last time (ie."b", "ab"`) will get double counted.
This insight leads to the recurrence:
Dp = dp[k] = 2 * dp[k-1] - dp[last[S[k]] - 1] (in case of double counting)
# 10ms 66.47%
class Solution {
public int distinctSubseqII(String S) {
int MOD = 1_000_000_007; // 10e7;
int N = S.length();
int[] dp = new int[N + 1];
dp[0] = 1;
int[] last = new int[26];
Arrays.fill(last, -1);
for (int i = 0; i < N; ++i) {
int x = S.charAt(i) - 'a';
dp[i + 1] = dp[i] * 2 % MOD;
if (last[x] >= 0) {
dp[i + 1] -= dp[last[x]];
}
dp[i + 1] %= MOD;
last[x] = i;
}
dp[N]--;
if (dp[N] < 0) dp[N] += MOD;
return dp[N];
}
}
# 11ms 60.16%
class Solution {
public int distinctSubseqII(String S) {
// a b c
// a -> a
// b -> ab b
// c -> ac abc bc c
// 1 append 2 skip
// a b a
// a -> a a ""
// b -> ab b ab b a ""
// a -> aa aba ba a aba ba aa a ab b a "" -> exclude all answers end with current char
long[] counter = new long[26];
long prevSum = 0;
long mod = (long)(1e9 + 7);
long curr = 0;
for (int i = 0; i < S.length(); i++) {
// append
curr = prevSum + 1; // append to all previous sub sequences or append itself
int index = S.charAt(i) - 'a';
counter[index] = curr;
for (int j = 0; j < 26; j++) {
if (j != index) {
curr += counter[j];
}
}
curr = curr % mod;
prevSum = curr; // result of substring [0, i]
}
return (int)curr;
}
}
# 8ms 79.79%
class Solution {
public int distinctSubseqII(String S) {
int[] dict = new int[26]; // Save 'total' count when a character appears.
int total = 1; //Empty string, starting at count 1
for (char c : S.toCharArray()) {
int combo = total * 2 - dict[c - 'a']; // New - Duplicates
dict[c - 'a'] = total; // if 'c' ever appears again, it will clash with the current combos.
total = combo < 0 ? combo + 1000000007 : combo % 1000000007; // mod and fix negative mods
}
return total - 1; // Subtract the empty string
}
}
'''
|
en
| 0.64429
|
# Time: O() # Space: O() # # Description: Leetcode # 940. Distinct Subsequences II # # Given a string S, count the number of distinct, non-empty subsequences of S . # # Since the result may be large, return the answer modulo 10^9 + 7. # # Example 1: # # Input: "abc" # Output: 7 # Explanation: The 7 distinct subsequences are "a", "b", "c", "ab", "ac", "bc", and "abc". # Example 2: # # Input: "aba" # Output: 6 # Explanation: The 6 distinct subsequences are "a", "b", "ab", "ba", "aa" and "aba". # Example 3: # # Input: "aaa" # Output: 3 # Explanation: The 3 distinct subsequences are "a", "aa" and "aaa". # # Note: # # S contains only lowercase letters. # 1 <= S.length <= 2000 # # 40ms 84.31% :type S: str :rtype: int # Thought: https://leetcode.com/problems/distinct-subsequences-ii/solution/ Approach 1: Dynamic Programming Complexity Analysis Time Complexity: O(N), where N is the length of S. Space Complexity: O(N). It is possible to adapt this solution to take O(1) space. Naively, for say, S = "abcx", we have dp[k] = dp[k-1] * 2. This is because for dp[2] which counts ("", "a", "b", "c", "ab", "ac", "bc", "abc"), dp[3] counts all of those, plus all of those with the x ending, like ("x", "ax", "bx", "cx", "abx", "acx", "bcx", "abcx"). However, for something like S = "abab", let's play around with it. We have: dp[0] = 2, as it counts ("", "a") dp[1] = 4, as it counts ("", "a", "b", "ab"); dp[2] = 7 as it counts ("", "a", "b", "aa", "ab", "ba", "aba"); dp[3] = 12, as it counts ("", "a", "b", "aa", "ab", "ba", "bb", "aab", "aba", "abb", "bab", "abab"). We have that dp[3]countsdp[2], plus("b", "aa", "ab", "ba", "aba")with"b"added to it. Notice that("", "a")are missing from this list, as they get double counted. In general, the sequences that resulted from putting"b"the last time (ie."b", "ab"`) will get double counted. This insight leads to the recurrence: Dp = dp[k] = 2 * dp[k-1] - dp[last[S[k]] - 1] (in case of double counting) # 10ms 66.47% class Solution { public int distinctSubseqII(String S) { int MOD = 1_000_000_007; // 10e7; int N = S.length(); int[] dp = new int[N + 1]; dp[0] = 1; int[] last = new int[26]; Arrays.fill(last, -1); for (int i = 0; i < N; ++i) { int x = S.charAt(i) - 'a'; dp[i + 1] = dp[i] * 2 % MOD; if (last[x] >= 0) { dp[i + 1] -= dp[last[x]]; } dp[i + 1] %= MOD; last[x] = i; } dp[N]--; if (dp[N] < 0) dp[N] += MOD; return dp[N]; } } # 11ms 60.16% class Solution { public int distinctSubseqII(String S) { // a b c // a -> a // b -> ab b // c -> ac abc bc c // 1 append 2 skip // a b a // a -> a a "" // b -> ab b ab b a "" // a -> aa aba ba a aba ba aa a ab b a "" -> exclude all answers end with current char long[] counter = new long[26]; long prevSum = 0; long mod = (long)(1e9 + 7); long curr = 0; for (int i = 0; i < S.length(); i++) { // append curr = prevSum + 1; // append to all previous sub sequences or append itself int index = S.charAt(i) - 'a'; counter[index] = curr; for (int j = 0; j < 26; j++) { if (j != index) { curr += counter[j]; } } curr = curr % mod; prevSum = curr; // result of substring [0, i] } return (int)curr; } } # 8ms 79.79% class Solution { public int distinctSubseqII(String S) { int[] dict = new int[26]; // Save 'total' count when a character appears. int total = 1; //Empty string, starting at count 1 for (char c : S.toCharArray()) { int combo = total * 2 - dict[c - 'a']; // New - Duplicates dict[c - 'a'] = total; // if 'c' ever appears again, it will clash with the current combos. total = combo < 0 ? combo + 1000000007 : combo % 1000000007; // mod and fix negative mods } return total - 1; // Subtract the empty string } }
| 3.573824
| 4
|
pytorch_optimizer/ralamb.py
|
gheyret/pytorch_optimizer
| 20
|
6627667
|
import math
import torch
from torch.optim import Optimizer
from pytorch_optimizer.base_optimizer import BaseOptimizer
from pytorch_optimizer.types import BETAS, CLOSURE, DEFAULTS, LOSS, PARAMETERS
class RaLamb(Optimizer, BaseOptimizer):
"""
Reference : https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
Example :
from pytorch_optimizer import RaLamb
...
model = YourModel()
optimizer = RaLamb(model.parameters())
...
for input, output in data:
optimizer.zero_grad()
loss = loss_function(output, model(input))
loss.backward()
optimizer.step()
"""
clamp: float = 10.0
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
adamd_debias_term: bool = False,
pre_norm: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
):
"""RaLamb
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
:param lr: float. learning rate
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace
:param eps: float. term added to the denominator to improve numerical stability
:param weight_decay: float. weight decay (L2 penalty)
:param adamd_debias_term: bool. Only correct the denominator to avoid inflating step sizes early in training
:param pre_norm: bool. perform pre-normalization of all gradients
:param n_sma_threshold: int. (recommended is 5)
:param degenerated_to_sgd: float. degenerated to SGD
"""
self.lr = lr
self.betas = betas
self.weight_decay = weight_decay
self.eps = eps
self.adamd_debias_term = adamd_debias_term
self.pre_norm = pre_norm
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
self.validate_parameters()
defaults: DEFAULTS = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
adamd_debias_term=adamd_debias_term,
buffer=[[None, None, None] for _ in range(10)],
)
super().__init__(params, defaults)
def validate_parameters(self):
self.validate_learning_rate(self.lr)
self.validate_betas(self.betas)
self.validate_weight_decay(self.weight_decay)
self.validate_epsilon(self.eps)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def get_gradient_norm(self) -> float:
norm_sq: float = 0.0
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
norm_sq += torch.linalg.norm(p.grad).cpu().numpy() ** 2
norm = math.sqrt(norm_sq)
return norm
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_norm: float = 1.0
if self.pre_norm:
grad_norm = self.get_gradient_norm()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if self.pre_norm:
p.grad /= grad_norm
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RaLamb does not support sparse gradients')
if grad.dtype in (torch.float16, torch.bfloat16):
grad = grad.float()
p_fp32 = p
if p.dtype in (torch.float16, torch.bfloat16):
p_fp32 = p_fp32.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
state['step'] += 1
buffered = group['buffer'][state['step'] % 10]
bias_correction1 = 1.0 - beta1 ** state['step']
if state['step'] == buffered[0]:
n_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
n_sma_max = 2 / (1 - beta2) - 1
n_sma = n_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = n_sma
# more conservative since it's an approximated value
if n_sma >= self.n_sma_threshold:
rt = math.sqrt(
(1 - beta2_t)
* (n_sma - 4)
/ (n_sma_max - 4)
* (n_sma - 2)
/ n_sma
* n_sma_max
/ (n_sma_max - 2)
)
step_size = rt
if not group['adamd_debias_term']:
step_size /= bias_correction1
elif self.degenerated_to_sgd:
step_size = 1.0 / bias_correction1
else:
step_size = group['lr'] / bias_correction1
buffered[2] = step_size
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr'])
radam_step = p_fp32.clone()
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
radam_step.add_(exp_avg, alpha=-step_size)
radam_step = radam_step.pow(2).sum().sqrt()
weight_norm = p.pow(2).sum().sqrt().clamp(0.0, self.clamp)
if weight_norm == 0 or radam_step == 0:
trust_ratio = 1.0
else:
trust_ratio = weight_norm / radam_step
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_step
state['trust_ratio'] = trust_ratio
if n_sma >= self.n_sma_threshold:
p_fp32.addcdiv_(exp_avg, de_nom, value=-step_size * trust_ratio)
else:
p_fp32.add_(exp_avg, alpha=-step_size * trust_ratio)
if p.dtype in (torch.float16, torch.bfloat16):
p.copy_(p_fp32)
return loss
|
import math
import torch
from torch.optim import Optimizer
from pytorch_optimizer.base_optimizer import BaseOptimizer
from pytorch_optimizer.types import BETAS, CLOSURE, DEFAULTS, LOSS, PARAMETERS
class RaLamb(Optimizer, BaseOptimizer):
"""
Reference : https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20
Example :
from pytorch_optimizer import RaLamb
...
model = YourModel()
optimizer = RaLamb(model.parameters())
...
for input, output in data:
optimizer.zero_grad()
loss = loss_function(output, model(input))
loss.backward()
optimizer.step()
"""
clamp: float = 10.0
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
adamd_debias_term: bool = False,
pre_norm: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
):
"""RaLamb
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
:param lr: float. learning rate
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace
:param eps: float. term added to the denominator to improve numerical stability
:param weight_decay: float. weight decay (L2 penalty)
:param adamd_debias_term: bool. Only correct the denominator to avoid inflating step sizes early in training
:param pre_norm: bool. perform pre-normalization of all gradients
:param n_sma_threshold: int. (recommended is 5)
:param degenerated_to_sgd: float. degenerated to SGD
"""
self.lr = lr
self.betas = betas
self.weight_decay = weight_decay
self.eps = eps
self.adamd_debias_term = adamd_debias_term
self.pre_norm = pre_norm
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
self.validate_parameters()
defaults: DEFAULTS = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
adamd_debias_term=adamd_debias_term,
buffer=[[None, None, None] for _ in range(10)],
)
super().__init__(params, defaults)
def validate_parameters(self):
self.validate_learning_rate(self.lr)
self.validate_betas(self.betas)
self.validate_weight_decay(self.weight_decay)
self.validate_epsilon(self.eps)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def get_gradient_norm(self) -> float:
norm_sq: float = 0.0
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
norm_sq += torch.linalg.norm(p.grad).cpu().numpy() ** 2
norm = math.sqrt(norm_sq)
return norm
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
grad_norm: float = 1.0
if self.pre_norm:
grad_norm = self.get_gradient_norm()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
if self.pre_norm:
p.grad /= grad_norm
grad = p.grad
if grad.is_sparse:
raise RuntimeError('RaLamb does not support sparse gradients')
if grad.dtype in (torch.float16, torch.bfloat16):
grad = grad.float()
p_fp32 = p
if p.dtype in (torch.float16, torch.bfloat16):
p_fp32 = p_fp32.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
state['step'] += 1
buffered = group['buffer'][state['step'] % 10]
bias_correction1 = 1.0 - beta1 ** state['step']
if state['step'] == buffered[0]:
n_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
n_sma_max = 2 / (1 - beta2) - 1
n_sma = n_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = n_sma
# more conservative since it's an approximated value
if n_sma >= self.n_sma_threshold:
rt = math.sqrt(
(1 - beta2_t)
* (n_sma - 4)
/ (n_sma_max - 4)
* (n_sma - 2)
/ n_sma
* n_sma_max
/ (n_sma_max - 2)
)
step_size = rt
if not group['adamd_debias_term']:
step_size /= bias_correction1
elif self.degenerated_to_sgd:
step_size = 1.0 / bias_correction1
else:
step_size = group['lr'] / bias_correction1
buffered[2] = step_size
if group['weight_decay'] != 0:
p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr'])
radam_step = p_fp32.clone()
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
radam_step.add_(exp_avg, alpha=-step_size)
radam_step = radam_step.pow(2).sum().sqrt()
weight_norm = p.pow(2).sum().sqrt().clamp(0.0, self.clamp)
if weight_norm == 0 or radam_step == 0:
trust_ratio = 1.0
else:
trust_ratio = weight_norm / radam_step
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_step
state['trust_ratio'] = trust_ratio
if n_sma >= self.n_sma_threshold:
p_fp32.addcdiv_(exp_avg, de_nom, value=-step_size * trust_ratio)
else:
p_fp32.add_(exp_avg, alpha=-step_size * trust_ratio)
if p.dtype in (torch.float16, torch.bfloat16):
p.copy_(p_fp32)
return loss
|
en
| 0.623785
|
Reference : https://gist.github.com/redknightlois/c4023d393eb8f92bb44b2ab582d7ec20 Example : from pytorch_optimizer import RaLamb ... model = YourModel() optimizer = RaLamb(model.parameters()) ... for input, output in data: optimizer.zero_grad() loss = loss_function(output, model(input)) loss.backward() optimizer.step() RaLamb :param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups :param lr: float. learning rate :param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace :param eps: float. term added to the denominator to improve numerical stability :param weight_decay: float. weight decay (L2 penalty) :param adamd_debias_term: bool. Only correct the denominator to avoid inflating step sizes early in training :param pre_norm: bool. perform pre-normalization of all gradients :param n_sma_threshold: int. (recommended is 5) :param degenerated_to_sgd: float. degenerated to SGD # more conservative since it's an approximated value
| 3.1071
| 3
|
klever/core/vtg/emg/generators/__init__.py
|
lutovna/klever
| 1
|
6627668
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import importlib
from klever.core.utils import get_search_dirs
from klever.core.vtg.emg.common import get_or_die
from klever.core.vtg.emg.common.process.serialization import CollectionEncoder
def generate_processes(logger, conf, collection, abstract_task_desc, source):
"""
This is the main function for generating processes of the environment model in the intermediate representation.
From the configuration, the function reads the list of generators names and runs them one by one to obtain a final
set of processes before translation them into C code.
:param logger: logging.Logger plugin object.
:param conf: EMG configuration dict.
:param collection: ProcessCollection object.
:param abstract_task_desc: Description dict.
:param source: Source collection object.
:return: ProcessCollection object.
"""
# In a specific order start proess generators
generator_names = ((e, '.vtg.emg.generators.{}'.format(e)) for e in
[list(e.keys())[0] for e in get_or_die(conf, "generators options")])
configurations = [list(e.values())[0] for e in get_or_die(conf, "generators options")]
specifications_set = conf.get('specifications set')
# Find genererators
modules = [(shortname, importlib.import_module(name, 'klever.core')) for shortname, name in generator_names]
# Get specifications for each kind of a agenerator
possible_locations = [root for root, *_ in os.walk(os.path.dirname(conf['specifications dir']))] + \
list(get_search_dirs(conf['main working directory']))
reports = dict()
for index, (shortname, generator_module) in enumerate(modules):
# Set debug option
configurations[index]['keep intermediate files'] = conf.get('keep intermediate files')
generator = generator_module.ScenarioModelgenerator(logger, configurations[index])
specifications = generator.import_specifications(specifications_set, possible_locations)
reports.update(generator.make_scenarios(abstract_task_desc, collection, source, specifications))
# Now save specifications
if conf.get('keep intermediate files'):
# Save specifications
for kind in specifications:
file_name = "{} {}.json".format(shortname, kind)
generator.save_specification(specifications[kind], file_name)
# Save processes
with open('%s intermediate model.json' % str(shortname), mode='w', encoding='utf8') as fp:
json.dump(collection, fp, cls=CollectionEncoder, sort_keys=True, indent=2)
# Save images of processes
collection.save_digraphs('images')
return reports
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
import importlib
from klever.core.utils import get_search_dirs
from klever.core.vtg.emg.common import get_or_die
from klever.core.vtg.emg.common.process.serialization import CollectionEncoder
def generate_processes(logger, conf, collection, abstract_task_desc, source):
"""
This is the main function for generating processes of the environment model in the intermediate representation.
From the configuration, the function reads the list of generators names and runs them one by one to obtain a final
set of processes before translation them into C code.
:param logger: logging.Logger plugin object.
:param conf: EMG configuration dict.
:param collection: ProcessCollection object.
:param abstract_task_desc: Description dict.
:param source: Source collection object.
:return: ProcessCollection object.
"""
# In a specific order start proess generators
generator_names = ((e, '.vtg.emg.generators.{}'.format(e)) for e in
[list(e.keys())[0] for e in get_or_die(conf, "generators options")])
configurations = [list(e.values())[0] for e in get_or_die(conf, "generators options")]
specifications_set = conf.get('specifications set')
# Find genererators
modules = [(shortname, importlib.import_module(name, 'klever.core')) for shortname, name in generator_names]
# Get specifications for each kind of a agenerator
possible_locations = [root for root, *_ in os.walk(os.path.dirname(conf['specifications dir']))] + \
list(get_search_dirs(conf['main working directory']))
reports = dict()
for index, (shortname, generator_module) in enumerate(modules):
# Set debug option
configurations[index]['keep intermediate files'] = conf.get('keep intermediate files')
generator = generator_module.ScenarioModelgenerator(logger, configurations[index])
specifications = generator.import_specifications(specifications_set, possible_locations)
reports.update(generator.make_scenarios(abstract_task_desc, collection, source, specifications))
# Now save specifications
if conf.get('keep intermediate files'):
# Save specifications
for kind in specifications:
file_name = "{} {}.json".format(shortname, kind)
generator.save_specification(specifications[kind], file_name)
# Save processes
with open('%s intermediate model.json' % str(shortname), mode='w', encoding='utf8') as fp:
json.dump(collection, fp, cls=CollectionEncoder, sort_keys=True, indent=2)
# Save images of processes
collection.save_digraphs('images')
return reports
|
en
| 0.809391
|
# # Copyright (c) 2019 ISP RAS (http://www.ispras.ru) # Ivannikov Institute for System Programming of the Russian Academy of Sciences # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is the main function for generating processes of the environment model in the intermediate representation. From the configuration, the function reads the list of generators names and runs them one by one to obtain a final set of processes before translation them into C code. :param logger: logging.Logger plugin object. :param conf: EMG configuration dict. :param collection: ProcessCollection object. :param abstract_task_desc: Description dict. :param source: Source collection object. :return: ProcessCollection object. # In a specific order start proess generators # Find genererators # Get specifications for each kind of a agenerator # Set debug option # Now save specifications # Save specifications # Save processes # Save images of processes
| 2.017265
| 2
|
pkg/releasing/git.bzl
|
velentr/rules_pkg
| 0
|
6627669
|
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rule to extract the git changelog."""
def _git_changelog_impl(ctx):
"""Implements to git_changelog rule."""
args = ctx.actions.args()
tools = []
toolchain = ctx.toolchains["@rules_pkg//toolchains/git:git_toolchain_type"].git
if not toolchain.valid:
fail("The git_toolchain is not properly configured: " +
toolchain.name)
if toolchain.path:
args.add("--git_path", toolchain.path)
else:
executable = toolchain.label.files_to_run.executable
tools.append(executable)
tools.append(toolchain.label.default_runfiles.files.to_list())
args.add("--git_path", executable.path)
args.add("--git_root", toolchain.client_top)
args.add("--from_ref", ctx.attr.from_ref)
args.add("--to_ref", ctx.attr.to_ref)
args.add("--out", ctx.outputs.out.path)
if ctx.attr.verbose:
args.add("--verbose")
ctx.actions.run(
mnemonic = "GitChangelog",
executable = ctx.executable._git_changelog,
use_default_shell_env = True,
arguments = [args],
outputs = [ctx.outputs.out],
env = {
"LANG": "en_US.UTF-8",
"LC_CTYPE": "UTF-8",
"PYTHONIOENCODING": "UTF-8",
"PYTHONUTF8": "1",
},
execution_requirements = {
"local": "1",
},
tools = tools,
)
# Define the rule.
_git_changelog = rule(
doc = "Extracts the git changelog between two refs.",
attrs = {
"from_ref": attr.string(
doc = "lower commit ref. The default is to use the latest tag",
default = "_LATEST_TAG_",
),
"to_ref": attr.string(
doc = "upper commit ref. The default is HEAD",
default = "HEAD",
),
"out": attr.output(mandatory = True),
"verbose": attr.bool(
doc = "Be verbose",
default = False,
),
"_git_changelog": attr.label(
default = Label("//releasing:git_changelog_private"),
cfg = "exec",
executable = True,
allow_files = True,
),
},
implementation = _git_changelog_impl,
toolchains = ["@rules_pkg//toolchains/git:git_toolchain_type"],
)
def git_changelog(name, **kwargs):
_git_changelog(
name = name,
# This requires bazel 4.x
target_compatible_with = select({
"//toolchains/git:have_git": [],
"//conditions:default": ["//:not_compatible"],
}),
**kwargs,
)
|
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rule to extract the git changelog."""
def _git_changelog_impl(ctx):
"""Implements to git_changelog rule."""
args = ctx.actions.args()
tools = []
toolchain = ctx.toolchains["@rules_pkg//toolchains/git:git_toolchain_type"].git
if not toolchain.valid:
fail("The git_toolchain is not properly configured: " +
toolchain.name)
if toolchain.path:
args.add("--git_path", toolchain.path)
else:
executable = toolchain.label.files_to_run.executable
tools.append(executable)
tools.append(toolchain.label.default_runfiles.files.to_list())
args.add("--git_path", executable.path)
args.add("--git_root", toolchain.client_top)
args.add("--from_ref", ctx.attr.from_ref)
args.add("--to_ref", ctx.attr.to_ref)
args.add("--out", ctx.outputs.out.path)
if ctx.attr.verbose:
args.add("--verbose")
ctx.actions.run(
mnemonic = "GitChangelog",
executable = ctx.executable._git_changelog,
use_default_shell_env = True,
arguments = [args],
outputs = [ctx.outputs.out],
env = {
"LANG": "en_US.UTF-8",
"LC_CTYPE": "UTF-8",
"PYTHONIOENCODING": "UTF-8",
"PYTHONUTF8": "1",
},
execution_requirements = {
"local": "1",
},
tools = tools,
)
# Define the rule.
_git_changelog = rule(
doc = "Extracts the git changelog between two refs.",
attrs = {
"from_ref": attr.string(
doc = "lower commit ref. The default is to use the latest tag",
default = "_LATEST_TAG_",
),
"to_ref": attr.string(
doc = "upper commit ref. The default is HEAD",
default = "HEAD",
),
"out": attr.output(mandatory = True),
"verbose": attr.bool(
doc = "Be verbose",
default = False,
),
"_git_changelog": attr.label(
default = Label("//releasing:git_changelog_private"),
cfg = "exec",
executable = True,
allow_files = True,
),
},
implementation = _git_changelog_impl,
toolchains = ["@rules_pkg//toolchains/git:git_toolchain_type"],
)
def git_changelog(name, **kwargs):
_git_changelog(
name = name,
# This requires bazel 4.x
target_compatible_with = select({
"//toolchains/git:have_git": [],
"//conditions:default": ["//:not_compatible"],
}),
**kwargs,
)
|
en
| 0.840864
|
# Copyright 2021 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A rule to extract the git changelog. Implements to git_changelog rule. # Define the rule. # This requires bazel 4.x
| 1.820385
| 2
|
oi/algs/Graph/tree-gen.py
|
Riteme/test
| 3
|
6627670
|
<filename>oi/algs/Graph/tree-gen.py
#!/usr/bin/env pypy
import sys
from random import *
assert len(sys.argv) > 1
N = int(sys.argv[1])
S = [i for i in range(1, N + 1)]
shuffle(S)
print(N)
for i in range(1, N):
father = choice(S[:i])
print("{} {}".format(father, S[i]))
|
<filename>oi/algs/Graph/tree-gen.py
#!/usr/bin/env pypy
import sys
from random import *
assert len(sys.argv) > 1
N = int(sys.argv[1])
S = [i for i in range(1, N + 1)]
shuffle(S)
print(N)
for i in range(1, N):
father = choice(S[:i])
print("{} {}".format(father, S[i]))
|
ru
| 0.30987
|
#!/usr/bin/env pypy
| 2.941744
| 3
|
config.py
|
appKom/watchdog
| 0
|
6627671
|
<filename>config.py
# Her er de forskjellige konfigureringsmulighetene som finnes i scriptet.
# Endrer du disse her, så endres de alle steder.
# ICS plassering
icsLocation = 'https://calendar.google.com/calendar/ical/b72fgdhuv6g5mpoqa0bdvj095k%40group.calendar.google.com/public/basic.ics'
# Mail variables
frommail = "mail from here"
frommailpass = "<PASSWORD> local"
tomail = "to this mail"
dblocation = '/home/pi/watchdog/'
# report mode. 'daily' for a daily report and 'weekly' for a weekly report
reportMode = 'daily'
|
<filename>config.py
# Her er de forskjellige konfigureringsmulighetene som finnes i scriptet.
# Endrer du disse her, så endres de alle steder.
# ICS plassering
icsLocation = 'https://calendar.google.com/calendar/ical/b72fgdhuv6g5mpoqa0bdvj095k%40group.calendar.google.com/public/basic.ics'
# Mail variables
frommail = "mail from here"
frommailpass = "<PASSWORD> local"
tomail = "to this mail"
dblocation = '/home/pi/watchdog/'
# report mode. 'daily' for a daily report and 'weekly' for a weekly report
reportMode = 'daily'
|
no
| 0.544923
|
# Her er de forskjellige konfigureringsmulighetene som finnes i scriptet. # Endrer du disse her, så endres de alle steder. # ICS plassering # Mail variables # report mode. 'daily' for a daily report and 'weekly' for a weekly report
| 1.63076
| 2
|
python_packaging/src/test/conftest.py
|
jlmaccal/gromacs
| 3
|
6627672
|
#
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
# <NAME>, <NAME>, <NAME>, and <NAME>,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# http://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
"""Configuration and fixtures for pytest."""
import json
import logging
import os
import pytest
pytest_plugins = ('gmxapi.testsupport',)
try:
from mpi4py import MPI
rank_number = MPI.COMM_WORLD.Get_rank()
comm_size = MPI.COMM_WORLD.Get_size()
except ImportError:
rank_number = 0
comm_size = 1
rank_tag = ''
MPI = None
else:
rank_tag = 'rank{}:'.format(rank_number)
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.rank_tag = rank_tag
return record
logging.setLogRecordFactory(record_factory)
@pytest.fixture(scope='session')
def spc_water_box_collection(gmxcli, remove_tempdir):
"""Provide a collection of simulation input items for a simple simulation.
Prepare the MD input in a freshly created working directory.
Solvate a 5nm cubic box with spc water. Return a dictionary of the artifacts produced.
"""
import gmxapi as gmx
# TODO: Remove this import when the the spc_water_box fixture is migrated to gmxapi.testsupport
from gmxapi.testsupport import _cleandir
# TODO: (#2896) Fetch MD input from package / library data.
# Example:
# import pkg_resources
# # Note: importing pkg_resources means setuptools is required for running this test.
# # Get or build TPR file from data bundled via setup(package_data=...)
# # Ref https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
# from gmx.data import tprfilename
with _cleandir(remove_tempdir) as tempdir:
testdir = os.path.dirname(__file__)
with open(os.path.join(testdir, 'testdata.json'), 'r') as fh:
testdata = json.load(fh)
# TODO: (#2756) Don't rely on so many automagical behaviors (as described in comments below)
structurefile = os.path.join(tempdir, 'structure.gro')
# We let `gmx solvate` use the default solvent. Otherwise, we would do
# gro_input = testdata['solvent_structure']
# with open(structurefile, 'w') as fh:
# fh.write('\n'.join(gro_input))
# fh.write('\n')
topfile = os.path.join(tempdir, 'topology.top')
top_input = testdata['solvent_topology']
# `gmx solvate` will append a line to the provided file with the molecule count,
# so we strip the last line from the input topology.
with open(topfile, 'w') as fh:
fh.write('\n'.join(top_input[:-1]))
fh.write('\n')
assert os.path.exists(topfile)
solvate = gmx.commandline_operation(gmxcli,
arguments=['solvate', '-box', '5', '5', '5'],
# We use the default solvent instead of specifying one.
# input_files={'-cs': structurefile},
output_files={'-p': topfile,
'-o': structurefile,
}
)
assert os.path.exists(topfile)
if solvate.output.returncode.result() != 0:
logging.debug(solvate.output.stderr.result())
raise RuntimeError('solvate failed in spc_water_box testing fixture.')
# Choose an exactly representable dt of 2^-9 ps (approximately 0.002)
dt = 2. ** -9.
mdp_input = [('integrator', 'md'),
('dt', dt),
('cutoff-scheme', 'Verlet'),
('nsteps', 2),
('nstxout', 1),
('nstvout', 1),
('nstfout', 1),
('tcoupl', 'v-rescale'),
('tc-grps', 'System'),
('tau-t', 1),
('ref-t', 298)]
mdp_input = '\n'.join([' = '.join([str(item) for item in kvpair]) for kvpair in mdp_input])
mdpfile = os.path.join(tempdir, 'md.mdp')
with open(mdpfile, 'w') as fh:
fh.write(mdp_input)
fh.write('\n')
tprfile = os.path.join(tempdir, 'topol.tpr')
# We don't use mdout_mdp, but if we don't specify it to grompp,
# it will be created in the current working directory.
mdout_mdp = os.path.join(tempdir, 'mdout.mdp')
grompp = gmx.commandline_operation(gmxcli, 'grompp',
input_files={
'-f': mdpfile,
'-p': solvate.output.file['-p'],
'-c': solvate.output.file['-o'],
'-po': mdout_mdp,
},
output_files={'-o': tprfile})
tprfilename = grompp.output.file['-o'].result()
if grompp.output.returncode.result() != 0:
logging.debug(grompp.output.stderr.result())
raise RuntimeError('grompp failed in spc_water_box testing fixture.')
# TODO: more inspection of grompp errors...
assert os.path.exists(tprfilename)
collection = {
'tpr_filename': tprfilename,
'mdp_input_filename': mdpfile,
'mdp_output_filename': mdout_mdp,
'topology_filename': solvate.output.file['-p'].result(),
'gro_filename': solvate.output.file['-o'].result(),
'mdp_input_list': mdp_input
}
yield collection
@pytest.fixture(scope='session')
def spc_water_box(spc_water_box_collection):
"""Provide a TPR input file for a simple simulation."""
yield spc_water_box_collection['tpr_filename']
|
#
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
# <NAME>, <NAME>, <NAME>, and <NAME>,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# http://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at http://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out http://www.gromacs.org.
"""Configuration and fixtures for pytest."""
import json
import logging
import os
import pytest
pytest_plugins = ('gmxapi.testsupport',)
try:
from mpi4py import MPI
rank_number = MPI.COMM_WORLD.Get_rank()
comm_size = MPI.COMM_WORLD.Get_size()
except ImportError:
rank_number = 0
comm_size = 1
rank_tag = ''
MPI = None
else:
rank_tag = 'rank{}:'.format(rank_number)
old_factory = logging.getLogRecordFactory()
def record_factory(*args, **kwargs):
record = old_factory(*args, **kwargs)
record.rank_tag = rank_tag
return record
logging.setLogRecordFactory(record_factory)
@pytest.fixture(scope='session')
def spc_water_box_collection(gmxcli, remove_tempdir):
"""Provide a collection of simulation input items for a simple simulation.
Prepare the MD input in a freshly created working directory.
Solvate a 5nm cubic box with spc water. Return a dictionary of the artifacts produced.
"""
import gmxapi as gmx
# TODO: Remove this import when the the spc_water_box fixture is migrated to gmxapi.testsupport
from gmxapi.testsupport import _cleandir
# TODO: (#2896) Fetch MD input from package / library data.
# Example:
# import pkg_resources
# # Note: importing pkg_resources means setuptools is required for running this test.
# # Get or build TPR file from data bundled via setup(package_data=...)
# # Ref https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
# from gmx.data import tprfilename
with _cleandir(remove_tempdir) as tempdir:
testdir = os.path.dirname(__file__)
with open(os.path.join(testdir, 'testdata.json'), 'r') as fh:
testdata = json.load(fh)
# TODO: (#2756) Don't rely on so many automagical behaviors (as described in comments below)
structurefile = os.path.join(tempdir, 'structure.gro')
# We let `gmx solvate` use the default solvent. Otherwise, we would do
# gro_input = testdata['solvent_structure']
# with open(structurefile, 'w') as fh:
# fh.write('\n'.join(gro_input))
# fh.write('\n')
topfile = os.path.join(tempdir, 'topology.top')
top_input = testdata['solvent_topology']
# `gmx solvate` will append a line to the provided file with the molecule count,
# so we strip the last line from the input topology.
with open(topfile, 'w') as fh:
fh.write('\n'.join(top_input[:-1]))
fh.write('\n')
assert os.path.exists(topfile)
solvate = gmx.commandline_operation(gmxcli,
arguments=['solvate', '-box', '5', '5', '5'],
# We use the default solvent instead of specifying one.
# input_files={'-cs': structurefile},
output_files={'-p': topfile,
'-o': structurefile,
}
)
assert os.path.exists(topfile)
if solvate.output.returncode.result() != 0:
logging.debug(solvate.output.stderr.result())
raise RuntimeError('solvate failed in spc_water_box testing fixture.')
# Choose an exactly representable dt of 2^-9 ps (approximately 0.002)
dt = 2. ** -9.
mdp_input = [('integrator', 'md'),
('dt', dt),
('cutoff-scheme', 'Verlet'),
('nsteps', 2),
('nstxout', 1),
('nstvout', 1),
('nstfout', 1),
('tcoupl', 'v-rescale'),
('tc-grps', 'System'),
('tau-t', 1),
('ref-t', 298)]
mdp_input = '\n'.join([' = '.join([str(item) for item in kvpair]) for kvpair in mdp_input])
mdpfile = os.path.join(tempdir, 'md.mdp')
with open(mdpfile, 'w') as fh:
fh.write(mdp_input)
fh.write('\n')
tprfile = os.path.join(tempdir, 'topol.tpr')
# We don't use mdout_mdp, but if we don't specify it to grompp,
# it will be created in the current working directory.
mdout_mdp = os.path.join(tempdir, 'mdout.mdp')
grompp = gmx.commandline_operation(gmxcli, 'grompp',
input_files={
'-f': mdpfile,
'-p': solvate.output.file['-p'],
'-c': solvate.output.file['-o'],
'-po': mdout_mdp,
},
output_files={'-o': tprfile})
tprfilename = grompp.output.file['-o'].result()
if grompp.output.returncode.result() != 0:
logging.debug(grompp.output.stderr.result())
raise RuntimeError('grompp failed in spc_water_box testing fixture.')
# TODO: more inspection of grompp errors...
assert os.path.exists(tprfilename)
collection = {
'tpr_filename': tprfilename,
'mdp_input_filename': mdpfile,
'mdp_output_filename': mdout_mdp,
'topology_filename': solvate.output.file['-p'].result(),
'gro_filename': solvate.output.file['-o'].result(),
'mdp_input_list': mdp_input
}
yield collection
@pytest.fixture(scope='session')
def spc_water_box(spc_water_box_collection):
"""Provide a TPR input file for a simple simulation."""
yield spc_water_box_collection['tpr_filename']
|
en
| 0.816433
|
# # This file is part of the GROMACS molecular simulation package. # # Copyright (c) 2019,2020,2021, by the GROMACS development team, led by # <NAME>, <NAME>, <NAME>, and <NAME>, # and including many others, as listed in the AUTHORS file in the # top-level source directory and at http://www.gromacs.org. # # GROMACS is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # GROMACS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with GROMACS; if not, see # http://www.gnu.org/licenses, or write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # If you want to redistribute modifications to GROMACS, please # consider that scientific software is very special. Version # control is crucial - bugs must be traceable. We will be happy to # consider code for inclusion in the official distribution, but # derived work must not be called official GROMACS. Details are found # in the README & COPYING files - if they are missing, get the # official version at http://www.gromacs.org. # # To help us fund GROMACS development, we humbly ask that you cite # the research papers on the package. Check out http://www.gromacs.org. Configuration and fixtures for pytest. Provide a collection of simulation input items for a simple simulation. Prepare the MD input in a freshly created working directory. Solvate a 5nm cubic box with spc water. Return a dictionary of the artifacts produced. # TODO: Remove this import when the the spc_water_box fixture is migrated to gmxapi.testsupport # TODO: (#2896) Fetch MD input from package / library data. # Example: # import pkg_resources # # Note: importing pkg_resources means setuptools is required for running this test. # # Get or build TPR file from data bundled via setup(package_data=...) # # Ref https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files # from gmx.data import tprfilename # TODO: (#2756) Don't rely on so many automagical behaviors (as described in comments below) # We let `gmx solvate` use the default solvent. Otherwise, we would do # gro_input = testdata['solvent_structure'] # with open(structurefile, 'w') as fh: # fh.write('\n'.join(gro_input)) # fh.write('\n') # `gmx solvate` will append a line to the provided file with the molecule count, # so we strip the last line from the input topology. # We use the default solvent instead of specifying one. # input_files={'-cs': structurefile}, # Choose an exactly representable dt of 2^-9 ps (approximately 0.002) # We don't use mdout_mdp, but if we don't specify it to grompp, # it will be created in the current working directory. # TODO: more inspection of grompp errors... Provide a TPR input file for a simple simulation.
| 1.57343
| 2
|
src/ecco/lm.py
|
intellicommtech/ecco
| 0
|
6627673
|
from collections import defaultdict
import inspect
import json
import os
import random
import torch
import transformers
from transformers import BatchEncoding
import ecco
import numpy as np
from IPython import display as d
from torch.nn import functional as F
from ecco.attribution import compute_primary_attributions_scores
from ecco.output import OutputSeq
from typing import Optional, Any, List, Tuple, Dict, Union
from operator import attrgetter
import re
from ecco.util import is_partial_token, strip_tokenizer_prefix
from packaging import version
class LM(object):
"""
Ecco's central class. A wrapper around language models. We use it to run the language models
and collect important data like input saliency and neuron activations.
A LM object is typically not created directly by users,
it is returned by `ecco.from_pretrained()`.
Usage:
```python
import ecco
lm = ecco.from_pretrained('distilgpt2')
output = lm.generate("Hello computer")
```
"""
def __init__(self,
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizerFast,
model_name: str,
config: Dict[str, Any],
collect_activations_flag: Optional[bool] = False,
collect_activations_layer_nums: Optional[List[int]] = None, # None --> collect for all layers
verbose: Optional[bool] = True,
gpu: Optional[bool] = True
):
"""
Creates an LM object given a model and tokenizer.
Args:
model: HuggingFace Transformers Pytorch language model.
tokenizer: The tokenizer associated with the model
model_name: The name of the model. Used to retrieve required settings (like what the embedding layer is called)
config: Configuration that has the information about the layer whose activations we will collect
collect_activations_flag: True if we want to collect activations
collect_activations_layer_nums: If collecting activations, we can use this parameter to indicate which layers
to track. By default this would be None and we'd collect activations for all layers.
verbose: If True, model.generate() displays output tokens in HTML as they're generated.
gpu: Set to False to force using the CPU even if a GPU exists.
"""
self.model_name = model_name
self.model = model
if torch.cuda.is_available() and gpu:
self.model = model.to('cuda')
self.device = 'cuda' if torch.cuda.is_available() \
and self.model.device.type == 'cuda' \
else 'cpu'
self.tokenizer = tokenizer
self.verbose = verbose
self._path = os.path.dirname(ecco.__file__)
# Neuron Activation
self.collect_activations_flag = collect_activations_flag
self.collect_activations_layer_nums = collect_activations_layer_nums
# For each model, this indicates the layer whose activations
# we will collect
self.model_config = config
try:
self.model_type = self.model_config['type']
embeddings_layer_name = self.model_config['embedding']
embed_retriever = attrgetter(embeddings_layer_name)
self.model_embeddings = embed_retriever(self.model)
self.collect_activations_layer_name_sig = self.model_config['activations'][0]
except KeyError:
raise ValueError(
f"The model '{self.model_name}' is not correctly configured in Ecco's 'model-config.yaml' file"
) from KeyError()
assert self.model_type in ['causal', 'mlm', 'enc-dec'], f"model type {self.model_type} not found"
self._reset()
# If running in Jupyer, outputting setup this in one cell is enough. But for colab
# we're running it before every d.HTML cell
# d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
def _reset(self):
self._all_activations_dict = defaultdict(dict)
self.activations = defaultdict(dict)
self.all_activations = []
self.generation_activations = []
self.neurons_to_inhibit = {}
self.neurons_to_induce = {}
self._hooks = {}
def to(self, tensor: Union[torch.Tensor, BatchEncoding]):
if self.device == 'cuda':
return tensor.to('cuda')
return tensor
def _analyze_token(self,
encoder_input_embeds: torch.Tensor,
encoder_attention_mask: Optional, # TODO: use encoder mask and also decoder mask
decoder_input_embeds: Optional[torch.Tensor],
prediction_id: torch.Tensor,
attribution_flags: Optional[List[str]] = []) -> None:
"""
Analyzes a predicted token.
Currently this methods computes the primary attribution explainability scores for each given token.
"""
for attr_method in attribution_flags:
# deactivate hooks: attr method can perform multiple forward steps
self._remove_hooks()
# Add attribution scores to self.attributions
self.attributions[attr_method].append(
compute_primary_attributions_scores(
attr_method=attr_method,
model=self.model,
forward_kwargs={
'inputs_embeds': encoder_input_embeds,
'decoder_inputs_embeds': decoder_input_embeds
},
prediction_id=prediction_id
).cpu().detach().numpy()
)
def generate(self, input_str: str,
max_length: Optional[int] = 8,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
do_sample: Optional[bool] = False,
attribution: Optional[List[str]] = [],
generate: Optional[int] = None,
beam_size: int = 1,
num_return_sequences: int = 1,
**generate_kwargs: Any):
"""
Generate tokens in response to an input prompt.
Works with Language models like GPT2, not masked language models like BERT.
Args:
input_str: Input prompt. # TODO: accept batch of input strings
generate: Number of tokens to generate.
max_length: max length of sequence (input + output tokens)
temperature: Adjust the probability distibution of output candidate tokens.
top_k: Specify top-k tokens to consider in decoding. Only used when do_sample is True.
top_p: Specify top-p to consider in decoding. Only used when do_sample is True.
do_sample: Decoding parameter. If set to False, the model always always
chooses the highest scoring candidate output
token. This may lead to repetitive text. If set to True, the model considers
consults top_k and/or top_p to generate more interesting output.
attribution: List of attribution methods to be calculated. By default, it does not calculate anything.
beam_size: Beam size to consider while generating
num_return_sequences: number of sequences to return
generate_kwargs: Other arguments to be passed directly to self.model.generate
"""
assert self.model_type in ['enc-dec', 'causal'], f"generate method not supported for model type '{self.model_type}'"
assert num_return_sequences <= beam_size, "number of return sequences should be less than or equal to beam size"
top_k = top_k if top_k is not None else self.model.config.top_k
top_p = top_p if top_p is not None else self.model.config.top_p
temperature = temperature if temperature is not None else self.model.config.temperature
do_sample = do_sample if do_sample is not None else self.model.config.task_specific_params.get('text-generation', {}).get('do_sample', False)
pad_token_id = self.model.config.pad_token_id
eos_token_id = self.model.config.eos_token_id
# We need this as a batch in order to collect activations.
input_tokenized_info = self.tokenizer(input_str, return_tensors="pt")
input_tokenized_info = self.to(input_tokenized_info)
input_ids, attention_mask = input_tokenized_info['input_ids'], input_tokenized_info['attention_mask']
n_input_tokens = len(input_ids[0])
cur_len = n_input_tokens
if generate is not None:
max_length = n_input_tokens + generate
if cur_len >= max_length:
raise ValueError(
"max_length set to {} while input token has more tokens ({}). Consider increasing max_length" \
.format(max_length, cur_len))
# Get decoder input ids
if self.model_type == 'enc-dec': # FIXME: only done because causal LMs like GPT-2 have the _prepare_decoder_input_ids_for_generation method but do not use it
assert len(input_ids.size()) == 2 # will break otherwise
if version.parse(transformers.__version__) >= version.parse('4.13'):
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_ids.shape[0], None, None)
else:
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_ids, None, None)
else:
decoder_input_ids = None
# Print output
n_printed_tokens = n_input_tokens
if self.verbose:
viz_id = self.display_input_sequence(input_ids[0])
# Get model output
self._remove_hooks() # deactivate hooks: we will run them for the last model forward only
output = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=beam_size,
# FIXME: +1 in max_length to account for first start token in decoder, find a better way to do this
max_length=(generate or max_length - cur_len) + 1 if self.model_type == 'enc-dec' else max_length,
do_sample=do_sample,
top_p=top_p,
top_k=top_k,
temperature=temperature,
return_dict_in_generate=True,
output_scores=True,
num_return_sequences=num_return_sequences,
**generate_kwargs
)
# Get prediction logits for each chosen prediction id
prediction_logits, prediction_ids = [], []
if output.__class__.__name__.endswith("EncoderDecoderOutput"):
prediction_ids, prediction_scores = output.sequences[0][1:], output.scores
elif output.__class__.__name__.endswith("DecoderOnlyOutput"):
prediction_ids, prediction_scores = output.sequences[0][n_input_tokens:], output.scores
else:
raise NotImplementedError(f"Unexpected output type: {type(output)}")
assert prediction_ids != []
if beam_size == 1:
assert len(prediction_ids) == len(prediction_scores)
# print the generated sequences & confidence scores
generated_tokens = output.sequences.reshape((beam_size, output.sequences.shape[1]))
for i in range(num_return_sequences):
pred_seq = "".join(self.tokenizer.batch_decode(generated_tokens[i, :], skip_special_tokens=True))
pred_score = np.exp(np.array(output.sequences_scores.cpu()[i]))
print(f"Result {i+1}:")
print(f"Sequence: {pred_seq}")
print("Score: %.2f" % pred_score)
for pred_id, scores in zip(prediction_ids, prediction_scores):
prediction_logits.append(scores[0][pred_id])
# Analyze each generated token
self.attributions = defaultdict(list) # reset attributions dict
for pred_index, prediction_id in enumerate(prediction_ids):
# First get encoder/decoder input embeddings
encoder_input_embeds, _ = self._get_embeddings(input_ids)
# TODO: This is only okay as long as encoder and decoder share the embeddings
# Should make separate ones for more flexibility
if decoder_input_ids is not None:
decoder_input_embeds, _ = self._get_embeddings(decoder_input_ids)
else:
decoder_input_embeds= None
if pred_index == len(prediction_ids) - 1: # -1 because we want to catch the inputs for the last generated token
# attach hooks and run last forward step
# TODO: collect activation for more than 1 step
self._attach_hooks(self.model)
extra_forward_kwargs = {'attention_mask': attention_mask, 'decoder_inputs_embeds': decoder_input_embeds}
forward_kwargs = {
'inputs_embeds': encoder_input_embeds,
'use_cache': False,
'return_dict': True,
**{k: v for k, v in extra_forward_kwargs.items() if k in inspect.signature(self.model.forward).parameters}
}
_ = self.model(**forward_kwargs)
# Get primary attributions for produced token
self._analyze_token(
encoder_input_embeds=encoder_input_embeds,
encoder_attention_mask=attention_mask,
decoder_input_embeds=decoder_input_embeds,
attribution_flags=attribution,
prediction_id=prediction_id
)
# Recomputing inputs ids, attention mask and decoder input ids
if decoder_input_ids is not None:
assert len(decoder_input_ids.size()) == 2 # will break otherwise
decoder_input_ids = torch.cat(
[decoder_input_ids, torch.tensor([[prediction_id]], device=decoder_input_ids.device)],
dim=-1
)
else:
input_ids = torch.cat(
[input_ids, torch.tensor([[prediction_id]], device=input_ids.device)],
dim=-1
)
# Recomputing Attention Mask
if getattr(self.model, '_prepare_attention_mask_for_generation'):
assert len(input_ids.size()) == 2 # will break otherwise
attention_mask = self.model._prepare_attention_mask_for_generation(input_ids, pad_token_id, eos_token_id)
attention_mask = self.to(attention_mask)
offset = n_input_tokens if decoder_input_ids is not None else 0
generated_token_ids = decoder_input_ids if decoder_input_ids is not None else input_ids
# More than one token can be generated at once (e.g., automatic split/pad tokens)
while len(generated_token_ids[0]) + offset != n_printed_tokens:
# Display token
if self.verbose:
self.display_token(
viz_id,
generated_token_ids[0][n_printed_tokens - offset].cpu().numpy(),
cur_len
)
n_printed_tokens += 1
# Add a zero vector to the attributions vector, if we did not reach the last predicted token
if len(generated_token_ids[0]) + offset != n_printed_tokens:
for k in self.attributions:
self.attributions[k].insert(-1, np.zeros_like(self.attributions[k][-1]))
cur_len += 1
# Get encoder/decoder hidden states
embedding_states = None
for attributes in ["hidden_states", "encoder_hidden_states", "decoder_hidden_states"]:
out_attr = getattr(output, attributes, None)
if out_attr is not None:
tokens_hs_list = []
for token_out_attr in out_attr:
hs_list = []
for idx, layer_hs in enumerate(token_out_attr):
# in Hugging Face Transformers v4, there's an extra index for batch
if len(layer_hs.shape) == 3: # If there's a batch dimension, pick the first oen
hs = layer_hs.cpu().detach()[0].unsqueeze(0) # Adding a dimension to concat to later
# Earlier versions are only 2 dimensional
# But also, in v4, for GPT2, all except the last one would have 3 dims, the last layer
# would only have two dims
else:
hs = layer_hs.cpu().detach().unsqueeze(0)
hs_list.append(hs)
# First hidden state is the embedding layer, skip it
# FIXME: do this in a cleaner way
hs_list = torch.cat(hs_list, dim=0)
embedding_states = hs_list[0]
hidden_states = hs_list[1:]
tokens_hs_list.append(hidden_states)
setattr(output, attributes, tokens_hs_list)
# Pass 'hidden_states' to 'decoder_hidden_states'
if getattr(output, "hidden_states", None) is not None:
assert getattr(output, "encoder_hidden_states", None) is None \
and getattr(output, "decoder_hidden_states", None) is None, \
"Not expected to have encoder_hidden_states/decoder_hidden_states with 'hidden_states'"
setattr(output, "decoder_hidden_states", output.hidden_states)
encoder_hidden_states = getattr(output, "encoder_hidden_states", None)
decoder_hidden_states = getattr(output, "hidden_states", getattr(output, "decoder_hidden_states", None))
# Turn activations from dict to a proper array
activations_dict = self._all_activations_dict
for layer_type, activations in activations_dict.items():
self.activations[layer_type] = activations_dict_to_array(activations)
if decoder_input_ids is not None:
assert len(decoder_input_ids.size()) == 2
all_token_ids = torch.cat([input_ids, decoder_input_ids], dim=-1)[0]
else:
all_token_ids = input_ids[0]
tokens = self.tokenizer.convert_ids_to_tokens(all_token_ids)
# tokens = []
# for i in all_token_ids:
# token = self.tokenizer.decode([i])
# tokens.append(token)
attributions = self.attributions
attn = getattr(output, "attentions", None)
return OutputSeq(**{'tokenizer': self.tokenizer,
'token_ids': all_token_ids.unsqueeze(0), # Add a batch dimension
'n_input_tokens': n_input_tokens,
'output_text': self.tokenizer.decode(all_token_ids),
'tokens': [tokens], # Add a batch dimension
'encoder_hidden_states': encoder_hidden_states,
'decoder_hidden_states': decoder_hidden_states,
'embedding_states': embedding_states,
'attention': attn,
'attribution': attributions,
'activations': self.activations,
'collect_activations_layer_nums': self.collect_activations_layer_nums,
'lm_head': self.model.lm_head,
'model_type': self.model_type,
'device': self.device,
'config': self.model_config})
def __call__(self, input_tokens: torch.Tensor):
"""
Run a forward pass through the model. For when we don't care about output tokens.
Currently only support activations collection. No attribution/saliency.
Usage:
```python
inputs = lm.tokenizer("Hello computer", return_tensors="pt")
output = lm(inputs)
```
Args:
input_tokens: tuple returned by tokenizer( TEXT, return_tensors="pt").
contains key 'input_ids', its value tensor with input token ids.
Shape is (batch_size, sequence_length).
Also a key for masked tokens
"""
if 'input_ids' not in input_tokens:
raise ValueError("Parameter 'input_tokens' needs to have the attribute 'input_ids'."
"Verify it was produced by the appropriate tokenizer with the "
"parameter return_tensors=\"pt\".")
# Move inputs to GPU if the model is on GPU
if self.model.device.type == "cuda" and input_tokens['input_ids'].device.type == "cpu":
input_tokens = self.to(input_tokens)
# Remove downstream. For now setting to batch length
n_input_tokens = len(input_tokens['input_ids'][0])
# attach hooks
self._attach_hooks(self.model)
# model
if self.model_type == 'mlm':
output = self.model(**input_tokens, return_dict=True)
lm_head = None
elif self.model_type == 'causal':
output = self.model(**input_tokens, return_dict=True, use_cache=False)
lm_head = self.model.lm_head
elif self.model_type == 'enc-dec':
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_tokens['input_ids'], None, None)
output = self.model(**input_tokens, decoder_input_ids=decoder_input_ids, return_dict=True, use_cache=False)
lm_head = self.model.lm_head
else:
raise NotImplemented(f"model type {self.model_type} not found")
# Turn activations from dict to a proper array
activations_dict = self._all_activations_dict
for layer_type, activations in activations_dict.items():
self.activations[layer_type] = activations_dict_to_array(activations)
encoder_hidden_states = getattr(output, "encoder_hidden_states", None)
decoder_hidden_states = getattr(output, "hidden_states", getattr(output, "decoder_hidden_states", None))
if self.model_type in ['causal', 'mlm']:
# First hidden state of the causal model is the embedding layer, skip it
# FIXME: do this in a cleaner way
embedding_states = decoder_hidden_states[0]
decoder_hidden_states = decoder_hidden_states[1:]
elif self.model_type == 'enc-dec':
embedding_states = encoder_hidden_states[0]
encoder_hidden_states = encoder_hidden_states[1:]
else:
raise NotImplemented(f"model type {self.model_type} not found")
tokens = []
for i in input_tokens['input_ids']:
token = self.tokenizer.convert_ids_to_tokens(i)
tokens.append(token)
attn = getattr(output, "attentions", None)
return OutputSeq(**{'tokenizer': self.tokenizer,
'token_ids': input_tokens['input_ids'],
'n_input_tokens': n_input_tokens,
'tokens': tokens,
'encoder_hidden_states': encoder_hidden_states,
'decoder_hidden_states': decoder_hidden_states,
'embedding_states': embedding_states,
'attention': attn,
'activations': self.activations,
'collect_activations_layer_nums': self.collect_activations_layer_nums,
'lm_head': lm_head,
'model_type': self.model_type,
'device': self.device,
'config': self.model_config})
def _get_embeddings(self, input_ids) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Get token embeddings and one-hot vector into vocab. It's done via matrix multiplication
so that gradient attribution is available when needed.
Args:
input_ids: Int tensor containing token ids. Of length (sequence length).
Generally returned from the the tokenizer such as
lm.tokenizer(text, return_tensors="pt")['input_ids'][0]
Returns:
inputs_embeds: Embeddings of the tokens. Dimensions are (sequence_len, d_embed)
token_ids_tensor_one_hot: Dimensions are (sequence_len, vocab_size)
"""
embedding_matrix = self.model_embeddings
vocab_size = embedding_matrix.shape[0]
one_hot_tensor = self.to(_one_hot_batched(input_ids, vocab_size))
token_ids_tensor_one_hot = one_hot_tensor.clone().requires_grad_(True)
inputs_embeds = torch.matmul(token_ids_tensor_one_hot, embedding_matrix)
return inputs_embeds, token_ids_tensor_one_hot
def _attach_hooks(self, model):
# TODO: Collect activations for more than 1 step
if self._hooks:
# skip if hooks are already attached
return
for name, module in model.named_modules():
# Add hooks to capture activations in every FFNN
if re.search(self.collect_activations_layer_name_sig, name):
# print("mlp.c_proj", self.collect_activations_flag , name)
if self.collect_activations_flag:
self._hooks[name] = module.register_forward_hook(
lambda self_, input_, output,
name=name: self._get_activations_hook(name, input_))
# Register neuron inhibition hook
self._hooks[name + '_inhibit'] = module.register_forward_pre_hook(
lambda self_, input_, name=name: \
self._inhibit_neurons_hook(name, input_)
)
def _remove_hooks(self):
for handle in self._hooks.values():
handle.remove()
self._hooks = {}
def _get_activations_hook(self, name: str, input_):
"""
Collects the activation for all tokens (input and output).
The default activations collection method.
Args:
input_: activation tuple to capture. A tuple containing one tensor of
dimensions (batch_size, sequence_length, neurons)
"""
# print('_get_activations_hook', name)
# pprint(input_)
# print(type(input_), len(input_), type(input_[0]), input_[0].shape, len(input_[0]), input_[0][0].shape)
# in distilGPT and GPT2, the layer name is 'transformer.h.0.mlp.c_fc'
# Extract the number of the layer from the name
# TODO: it will not always be 2 for other models. Move to model-config
# layer_number = int(name.split('.')[2])
# Get the layer number. This will be an int with periods before aand after it.
# (?<=\.) means look for a period before the int
# \d+ means look for one or multiple digits
# (?=\.) means look for a period after the int
layer_number = re.search("(?<=\.)\d+(?=\.)", name).group(0)
layer_type = 'encoder' if name.startswith('encoder.') else 'decoder'
# print("layer number: ", layer_number)
collecting_this_layer = (self.collect_activations_layer_nums is None) or (
layer_number in self.collect_activations_layer_nums)
if collecting_this_layer:
# Initialize the layer's key the first time we encounter it
if layer_number not in self._all_activations_dict:
self._all_activations_dict[layer_type][layer_number] = [0]
# For MLM, we only run one inference step. We save it.
# For Causal LM, we could be running multiple inference steps with generate(). In that case,
# overwrite the previous step activations. This collects all activations in the last step
# Assuming all input tokens are presented as input, no "past"
# The inputs to c_proj already pass through the gelu activation function
self._all_activations_dict[layer_type][layer_number] = input_[0].detach().cpu().numpy()
def _inhibit_neurons_hook(self, name: str, input_tensor):
"""
After being attached as a pre-forward hook, it sets to zero the activation value
of the neurons indicated in self.neurons_to_inhibit
"""
layer_number = re.search("(?<=\.)\d+(?=\.)", name).group(0)
if layer_number in self.neurons_to_inhibit.keys():
# print('layer_number', layer_number, input_tensor[0].shape)
for n in self.neurons_to_inhibit[layer_number]:
# print('inhibiting', layer_number, n)
input_tensor[0][0][-1][n] = 0 # tuple, batch, position
if layer_number in self.neurons_to_induce.keys():
# print('layer_number', layer_number, input_tensor[0].shape)
for n in self.neurons_to_induce[layer_number]:
# print('inhibiting', layer_number, n)
input_tensor[0][0][-1][n] = input_tensor[0][0][-1][n] * 10 # tuple, batch, position
return input_tensor
def display_input_sequence(self, input_ids):
tokens = []
for idx, token_id in enumerate(input_ids):
type = "input"
raw_token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
clean_token = self.tokenizer.decode(token_id)
# Strip prefixes because bert decode still has ## for partials even after decode()
clean_token = strip_tokenizer_prefix(self.model_config, clean_token)
tokens.append({
# 'token': self.tokenizer.decode([token_id]),
'token': clean_token,
'is_partial': is_partial_token(self.model_config, raw_token),
'position': idx,
'token_id': int(token_id),
'type': type})
data = {'tokens': tokens}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
viz_id = f'viz_{round(random.random() * 1000000)}'
# TODO: Stop passing tokenization_config to JS now that
# it's handled with the is_partial parameter
js = f"""
requirejs( ['basic', 'ecco'], function(basic, ecco){{
basic.init('{viz_id}') // Python needs to know the viz id. Used for each output token.
window.ecco['{viz_id}'] = new ecco.renderOutputSequence({{
parentDiv: '{viz_id}',
data: {json.dumps(data)},
tokenization_config: {json.dumps(self.model_config['tokenizer_config'])}
}})
}}, function (err) {{
console.log(err);
}})
"""
d.display(d.Javascript(js))
return viz_id
def display_token(self, viz_id, token_id, position):
raw_token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
clean_token = self.tokenizer.decode(token_id)
# Strip prefixes because bert decode still has ## for partials even after decode()
clean_token = strip_tokenizer_prefix(self.model_config, clean_token)
token = {
# 'token': self.tokenizer.decode([token_id]),
'token': clean_token,
'is_partial': is_partial_token(self.model_config, raw_token),
'token_id': int(token_id),
'position': position,
'type': 'output'
}
js = f"""
// We don't really need these require scripts. But this is to avert
//this code from running before display_input_sequence which DOES require external files
requirejs(['basic', 'ecco'], function(basic, ecco){{
console.log('addToken viz_id', '{viz_id}');
window.ecco['{viz_id}'].addToken({json.dumps(token)})
window.ecco['{viz_id}'].redraw()
}})
"""
# print(js)
d.display(d.Javascript(js))
def predict_token(self, inputs, topk=50, temperature=1.0):
output = self.model(**inputs)
scores = output[0][0][-1] / temperature
s = scores.detach().numpy()
sorted_predictions = s.argsort()[::-1]
sm = F.softmax(scores, dim=-1).detach().numpy()
tokens = [self.tokenizer.decode([t]) for t in sorted_predictions[:topk]]
probs = sm[sorted_predictions[:topk]]
prediction_data = []
for idx, (token, prob) in enumerate(zip(tokens, probs)):
# print(idx, token, prob)
prediction_data.append({'token': token,
'prob': str(prob),
'ranking': idx + 1,
'token_id': str(sorted_predictions[idx])
})
params = prediction_data
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
d.display(d.HTML(filename=os.path.join(self._path, "html", "predict_token.html")))
js = """
requirejs(['predict_token'], function(predict_token){{
if (window.predict === undefined)
window.predict = {{}}
window.predict["{}"] = new predict_token.predictToken("{}", {})
}}
)
""".format(viz_id, viz_id, json.dumps(params))
d.display(d.Javascript(js))
def sample_output_token(scores, do_sample, temperature, top_k, top_p):
# TODO: Add beam search in here
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = transformers.generation_utils. \
top_k_top_p_filtering(scores,
top_k=top_k,
top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)
prediction_id = torch.multinomial(probs, num_samples=1)
else:
# Greedy decoding
prediction_id = torch.argmax(scores, dim=-1)
prediction_id = prediction_id.squeeze()
return prediction_id
def _one_hot(token_ids: torch.Tensor, vocab_size: int) -> torch.Tensor:
return torch.zeros(len(token_ids), vocab_size, device=token_ids.device).scatter_(1, token_ids.unsqueeze(1), 1.)
def _one_hot_batched(token_ids: torch.Tensor, vocab_size: int) -> torch.Tensor:
batch_size, num_tokens = token_ids.shape
return torch.zeros(batch_size, num_tokens, vocab_size, device=token_ids.device).scatter_(-1, token_ids.unsqueeze(-1), 1.)
def activations_dict_to_array(activations_dict):
"""
Converts the dict used to collect activations into an array of the
shape (batch, layers, neurons, token position).
Args:
activations_dict: python dictionary. Contains a key/value for each layer
in the model whose activations were collected. Key is the layer id ('0', '1').
Value is a tensor of shape (batch, position, neurons).
"""
activations = []
for i in sorted(activations_dict.keys()):
activations.append(activations_dict[i])
activations = np.array(activations)
# 'activations' now is in the shape (layer, batch, position, neurons)
activations = np.swapaxes(activations, 2, 3)
activations = np.swapaxes(activations, 0, 1)
# print('after swapping: ', activations.shape)
return activations
|
from collections import defaultdict
import inspect
import json
import os
import random
import torch
import transformers
from transformers import BatchEncoding
import ecco
import numpy as np
from IPython import display as d
from torch.nn import functional as F
from ecco.attribution import compute_primary_attributions_scores
from ecco.output import OutputSeq
from typing import Optional, Any, List, Tuple, Dict, Union
from operator import attrgetter
import re
from ecco.util import is_partial_token, strip_tokenizer_prefix
from packaging import version
class LM(object):
"""
Ecco's central class. A wrapper around language models. We use it to run the language models
and collect important data like input saliency and neuron activations.
A LM object is typically not created directly by users,
it is returned by `ecco.from_pretrained()`.
Usage:
```python
import ecco
lm = ecco.from_pretrained('distilgpt2')
output = lm.generate("Hello computer")
```
"""
def __init__(self,
model: transformers.PreTrainedModel,
tokenizer: transformers.PreTrainedTokenizerFast,
model_name: str,
config: Dict[str, Any],
collect_activations_flag: Optional[bool] = False,
collect_activations_layer_nums: Optional[List[int]] = None, # None --> collect for all layers
verbose: Optional[bool] = True,
gpu: Optional[bool] = True
):
"""
Creates an LM object given a model and tokenizer.
Args:
model: HuggingFace Transformers Pytorch language model.
tokenizer: The tokenizer associated with the model
model_name: The name of the model. Used to retrieve required settings (like what the embedding layer is called)
config: Configuration that has the information about the layer whose activations we will collect
collect_activations_flag: True if we want to collect activations
collect_activations_layer_nums: If collecting activations, we can use this parameter to indicate which layers
to track. By default this would be None and we'd collect activations for all layers.
verbose: If True, model.generate() displays output tokens in HTML as they're generated.
gpu: Set to False to force using the CPU even if a GPU exists.
"""
self.model_name = model_name
self.model = model
if torch.cuda.is_available() and gpu:
self.model = model.to('cuda')
self.device = 'cuda' if torch.cuda.is_available() \
and self.model.device.type == 'cuda' \
else 'cpu'
self.tokenizer = tokenizer
self.verbose = verbose
self._path = os.path.dirname(ecco.__file__)
# Neuron Activation
self.collect_activations_flag = collect_activations_flag
self.collect_activations_layer_nums = collect_activations_layer_nums
# For each model, this indicates the layer whose activations
# we will collect
self.model_config = config
try:
self.model_type = self.model_config['type']
embeddings_layer_name = self.model_config['embedding']
embed_retriever = attrgetter(embeddings_layer_name)
self.model_embeddings = embed_retriever(self.model)
self.collect_activations_layer_name_sig = self.model_config['activations'][0]
except KeyError:
raise ValueError(
f"The model '{self.model_name}' is not correctly configured in Ecco's 'model-config.yaml' file"
) from KeyError()
assert self.model_type in ['causal', 'mlm', 'enc-dec'], f"model type {self.model_type} not found"
self._reset()
# If running in Jupyer, outputting setup this in one cell is enough. But for colab
# we're running it before every d.HTML cell
# d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
def _reset(self):
self._all_activations_dict = defaultdict(dict)
self.activations = defaultdict(dict)
self.all_activations = []
self.generation_activations = []
self.neurons_to_inhibit = {}
self.neurons_to_induce = {}
self._hooks = {}
def to(self, tensor: Union[torch.Tensor, BatchEncoding]):
if self.device == 'cuda':
return tensor.to('cuda')
return tensor
def _analyze_token(self,
encoder_input_embeds: torch.Tensor,
encoder_attention_mask: Optional, # TODO: use encoder mask and also decoder mask
decoder_input_embeds: Optional[torch.Tensor],
prediction_id: torch.Tensor,
attribution_flags: Optional[List[str]] = []) -> None:
"""
Analyzes a predicted token.
Currently this methods computes the primary attribution explainability scores for each given token.
"""
for attr_method in attribution_flags:
# deactivate hooks: attr method can perform multiple forward steps
self._remove_hooks()
# Add attribution scores to self.attributions
self.attributions[attr_method].append(
compute_primary_attributions_scores(
attr_method=attr_method,
model=self.model,
forward_kwargs={
'inputs_embeds': encoder_input_embeds,
'decoder_inputs_embeds': decoder_input_embeds
},
prediction_id=prediction_id
).cpu().detach().numpy()
)
def generate(self, input_str: str,
max_length: Optional[int] = 8,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
do_sample: Optional[bool] = False,
attribution: Optional[List[str]] = [],
generate: Optional[int] = None,
beam_size: int = 1,
num_return_sequences: int = 1,
**generate_kwargs: Any):
"""
Generate tokens in response to an input prompt.
Works with Language models like GPT2, not masked language models like BERT.
Args:
input_str: Input prompt. # TODO: accept batch of input strings
generate: Number of tokens to generate.
max_length: max length of sequence (input + output tokens)
temperature: Adjust the probability distibution of output candidate tokens.
top_k: Specify top-k tokens to consider in decoding. Only used when do_sample is True.
top_p: Specify top-p to consider in decoding. Only used when do_sample is True.
do_sample: Decoding parameter. If set to False, the model always always
chooses the highest scoring candidate output
token. This may lead to repetitive text. If set to True, the model considers
consults top_k and/or top_p to generate more interesting output.
attribution: List of attribution methods to be calculated. By default, it does not calculate anything.
beam_size: Beam size to consider while generating
num_return_sequences: number of sequences to return
generate_kwargs: Other arguments to be passed directly to self.model.generate
"""
assert self.model_type in ['enc-dec', 'causal'], f"generate method not supported for model type '{self.model_type}'"
assert num_return_sequences <= beam_size, "number of return sequences should be less than or equal to beam size"
top_k = top_k if top_k is not None else self.model.config.top_k
top_p = top_p if top_p is not None else self.model.config.top_p
temperature = temperature if temperature is not None else self.model.config.temperature
do_sample = do_sample if do_sample is not None else self.model.config.task_specific_params.get('text-generation', {}).get('do_sample', False)
pad_token_id = self.model.config.pad_token_id
eos_token_id = self.model.config.eos_token_id
# We need this as a batch in order to collect activations.
input_tokenized_info = self.tokenizer(input_str, return_tensors="pt")
input_tokenized_info = self.to(input_tokenized_info)
input_ids, attention_mask = input_tokenized_info['input_ids'], input_tokenized_info['attention_mask']
n_input_tokens = len(input_ids[0])
cur_len = n_input_tokens
if generate is not None:
max_length = n_input_tokens + generate
if cur_len >= max_length:
raise ValueError(
"max_length set to {} while input token has more tokens ({}). Consider increasing max_length" \
.format(max_length, cur_len))
# Get decoder input ids
if self.model_type == 'enc-dec': # FIXME: only done because causal LMs like GPT-2 have the _prepare_decoder_input_ids_for_generation method but do not use it
assert len(input_ids.size()) == 2 # will break otherwise
if version.parse(transformers.__version__) >= version.parse('4.13'):
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_ids.shape[0], None, None)
else:
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_ids, None, None)
else:
decoder_input_ids = None
# Print output
n_printed_tokens = n_input_tokens
if self.verbose:
viz_id = self.display_input_sequence(input_ids[0])
# Get model output
self._remove_hooks() # deactivate hooks: we will run them for the last model forward only
output = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
num_beams=beam_size,
# FIXME: +1 in max_length to account for first start token in decoder, find a better way to do this
max_length=(generate or max_length - cur_len) + 1 if self.model_type == 'enc-dec' else max_length,
do_sample=do_sample,
top_p=top_p,
top_k=top_k,
temperature=temperature,
return_dict_in_generate=True,
output_scores=True,
num_return_sequences=num_return_sequences,
**generate_kwargs
)
# Get prediction logits for each chosen prediction id
prediction_logits, prediction_ids = [], []
if output.__class__.__name__.endswith("EncoderDecoderOutput"):
prediction_ids, prediction_scores = output.sequences[0][1:], output.scores
elif output.__class__.__name__.endswith("DecoderOnlyOutput"):
prediction_ids, prediction_scores = output.sequences[0][n_input_tokens:], output.scores
else:
raise NotImplementedError(f"Unexpected output type: {type(output)}")
assert prediction_ids != []
if beam_size == 1:
assert len(prediction_ids) == len(prediction_scores)
# print the generated sequences & confidence scores
generated_tokens = output.sequences.reshape((beam_size, output.sequences.shape[1]))
for i in range(num_return_sequences):
pred_seq = "".join(self.tokenizer.batch_decode(generated_tokens[i, :], skip_special_tokens=True))
pred_score = np.exp(np.array(output.sequences_scores.cpu()[i]))
print(f"Result {i+1}:")
print(f"Sequence: {pred_seq}")
print("Score: %.2f" % pred_score)
for pred_id, scores in zip(prediction_ids, prediction_scores):
prediction_logits.append(scores[0][pred_id])
# Analyze each generated token
self.attributions = defaultdict(list) # reset attributions dict
for pred_index, prediction_id in enumerate(prediction_ids):
# First get encoder/decoder input embeddings
encoder_input_embeds, _ = self._get_embeddings(input_ids)
# TODO: This is only okay as long as encoder and decoder share the embeddings
# Should make separate ones for more flexibility
if decoder_input_ids is not None:
decoder_input_embeds, _ = self._get_embeddings(decoder_input_ids)
else:
decoder_input_embeds= None
if pred_index == len(prediction_ids) - 1: # -1 because we want to catch the inputs for the last generated token
# attach hooks and run last forward step
# TODO: collect activation for more than 1 step
self._attach_hooks(self.model)
extra_forward_kwargs = {'attention_mask': attention_mask, 'decoder_inputs_embeds': decoder_input_embeds}
forward_kwargs = {
'inputs_embeds': encoder_input_embeds,
'use_cache': False,
'return_dict': True,
**{k: v for k, v in extra_forward_kwargs.items() if k in inspect.signature(self.model.forward).parameters}
}
_ = self.model(**forward_kwargs)
# Get primary attributions for produced token
self._analyze_token(
encoder_input_embeds=encoder_input_embeds,
encoder_attention_mask=attention_mask,
decoder_input_embeds=decoder_input_embeds,
attribution_flags=attribution,
prediction_id=prediction_id
)
# Recomputing inputs ids, attention mask and decoder input ids
if decoder_input_ids is not None:
assert len(decoder_input_ids.size()) == 2 # will break otherwise
decoder_input_ids = torch.cat(
[decoder_input_ids, torch.tensor([[prediction_id]], device=decoder_input_ids.device)],
dim=-1
)
else:
input_ids = torch.cat(
[input_ids, torch.tensor([[prediction_id]], device=input_ids.device)],
dim=-1
)
# Recomputing Attention Mask
if getattr(self.model, '_prepare_attention_mask_for_generation'):
assert len(input_ids.size()) == 2 # will break otherwise
attention_mask = self.model._prepare_attention_mask_for_generation(input_ids, pad_token_id, eos_token_id)
attention_mask = self.to(attention_mask)
offset = n_input_tokens if decoder_input_ids is not None else 0
generated_token_ids = decoder_input_ids if decoder_input_ids is not None else input_ids
# More than one token can be generated at once (e.g., automatic split/pad tokens)
while len(generated_token_ids[0]) + offset != n_printed_tokens:
# Display token
if self.verbose:
self.display_token(
viz_id,
generated_token_ids[0][n_printed_tokens - offset].cpu().numpy(),
cur_len
)
n_printed_tokens += 1
# Add a zero vector to the attributions vector, if we did not reach the last predicted token
if len(generated_token_ids[0]) + offset != n_printed_tokens:
for k in self.attributions:
self.attributions[k].insert(-1, np.zeros_like(self.attributions[k][-1]))
cur_len += 1
# Get encoder/decoder hidden states
embedding_states = None
for attributes in ["hidden_states", "encoder_hidden_states", "decoder_hidden_states"]:
out_attr = getattr(output, attributes, None)
if out_attr is not None:
tokens_hs_list = []
for token_out_attr in out_attr:
hs_list = []
for idx, layer_hs in enumerate(token_out_attr):
# in Hugging Face Transformers v4, there's an extra index for batch
if len(layer_hs.shape) == 3: # If there's a batch dimension, pick the first oen
hs = layer_hs.cpu().detach()[0].unsqueeze(0) # Adding a dimension to concat to later
# Earlier versions are only 2 dimensional
# But also, in v4, for GPT2, all except the last one would have 3 dims, the last layer
# would only have two dims
else:
hs = layer_hs.cpu().detach().unsqueeze(0)
hs_list.append(hs)
# First hidden state is the embedding layer, skip it
# FIXME: do this in a cleaner way
hs_list = torch.cat(hs_list, dim=0)
embedding_states = hs_list[0]
hidden_states = hs_list[1:]
tokens_hs_list.append(hidden_states)
setattr(output, attributes, tokens_hs_list)
# Pass 'hidden_states' to 'decoder_hidden_states'
if getattr(output, "hidden_states", None) is not None:
assert getattr(output, "encoder_hidden_states", None) is None \
and getattr(output, "decoder_hidden_states", None) is None, \
"Not expected to have encoder_hidden_states/decoder_hidden_states with 'hidden_states'"
setattr(output, "decoder_hidden_states", output.hidden_states)
encoder_hidden_states = getattr(output, "encoder_hidden_states", None)
decoder_hidden_states = getattr(output, "hidden_states", getattr(output, "decoder_hidden_states", None))
# Turn activations from dict to a proper array
activations_dict = self._all_activations_dict
for layer_type, activations in activations_dict.items():
self.activations[layer_type] = activations_dict_to_array(activations)
if decoder_input_ids is not None:
assert len(decoder_input_ids.size()) == 2
all_token_ids = torch.cat([input_ids, decoder_input_ids], dim=-1)[0]
else:
all_token_ids = input_ids[0]
tokens = self.tokenizer.convert_ids_to_tokens(all_token_ids)
# tokens = []
# for i in all_token_ids:
# token = self.tokenizer.decode([i])
# tokens.append(token)
attributions = self.attributions
attn = getattr(output, "attentions", None)
return OutputSeq(**{'tokenizer': self.tokenizer,
'token_ids': all_token_ids.unsqueeze(0), # Add a batch dimension
'n_input_tokens': n_input_tokens,
'output_text': self.tokenizer.decode(all_token_ids),
'tokens': [tokens], # Add a batch dimension
'encoder_hidden_states': encoder_hidden_states,
'decoder_hidden_states': decoder_hidden_states,
'embedding_states': embedding_states,
'attention': attn,
'attribution': attributions,
'activations': self.activations,
'collect_activations_layer_nums': self.collect_activations_layer_nums,
'lm_head': self.model.lm_head,
'model_type': self.model_type,
'device': self.device,
'config': self.model_config})
def __call__(self, input_tokens: torch.Tensor):
"""
Run a forward pass through the model. For when we don't care about output tokens.
Currently only support activations collection. No attribution/saliency.
Usage:
```python
inputs = lm.tokenizer("Hello computer", return_tensors="pt")
output = lm(inputs)
```
Args:
input_tokens: tuple returned by tokenizer( TEXT, return_tensors="pt").
contains key 'input_ids', its value tensor with input token ids.
Shape is (batch_size, sequence_length).
Also a key for masked tokens
"""
if 'input_ids' not in input_tokens:
raise ValueError("Parameter 'input_tokens' needs to have the attribute 'input_ids'."
"Verify it was produced by the appropriate tokenizer with the "
"parameter return_tensors=\"pt\".")
# Move inputs to GPU if the model is on GPU
if self.model.device.type == "cuda" and input_tokens['input_ids'].device.type == "cpu":
input_tokens = self.to(input_tokens)
# Remove downstream. For now setting to batch length
n_input_tokens = len(input_tokens['input_ids'][0])
# attach hooks
self._attach_hooks(self.model)
# model
if self.model_type == 'mlm':
output = self.model(**input_tokens, return_dict=True)
lm_head = None
elif self.model_type == 'causal':
output = self.model(**input_tokens, return_dict=True, use_cache=False)
lm_head = self.model.lm_head
elif self.model_type == 'enc-dec':
decoder_input_ids = self.model._prepare_decoder_input_ids_for_generation(input_tokens['input_ids'], None, None)
output = self.model(**input_tokens, decoder_input_ids=decoder_input_ids, return_dict=True, use_cache=False)
lm_head = self.model.lm_head
else:
raise NotImplemented(f"model type {self.model_type} not found")
# Turn activations from dict to a proper array
activations_dict = self._all_activations_dict
for layer_type, activations in activations_dict.items():
self.activations[layer_type] = activations_dict_to_array(activations)
encoder_hidden_states = getattr(output, "encoder_hidden_states", None)
decoder_hidden_states = getattr(output, "hidden_states", getattr(output, "decoder_hidden_states", None))
if self.model_type in ['causal', 'mlm']:
# First hidden state of the causal model is the embedding layer, skip it
# FIXME: do this in a cleaner way
embedding_states = decoder_hidden_states[0]
decoder_hidden_states = decoder_hidden_states[1:]
elif self.model_type == 'enc-dec':
embedding_states = encoder_hidden_states[0]
encoder_hidden_states = encoder_hidden_states[1:]
else:
raise NotImplemented(f"model type {self.model_type} not found")
tokens = []
for i in input_tokens['input_ids']:
token = self.tokenizer.convert_ids_to_tokens(i)
tokens.append(token)
attn = getattr(output, "attentions", None)
return OutputSeq(**{'tokenizer': self.tokenizer,
'token_ids': input_tokens['input_ids'],
'n_input_tokens': n_input_tokens,
'tokens': tokens,
'encoder_hidden_states': encoder_hidden_states,
'decoder_hidden_states': decoder_hidden_states,
'embedding_states': embedding_states,
'attention': attn,
'activations': self.activations,
'collect_activations_layer_nums': self.collect_activations_layer_nums,
'lm_head': lm_head,
'model_type': self.model_type,
'device': self.device,
'config': self.model_config})
def _get_embeddings(self, input_ids) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Get token embeddings and one-hot vector into vocab. It's done via matrix multiplication
so that gradient attribution is available when needed.
Args:
input_ids: Int tensor containing token ids. Of length (sequence length).
Generally returned from the the tokenizer such as
lm.tokenizer(text, return_tensors="pt")['input_ids'][0]
Returns:
inputs_embeds: Embeddings of the tokens. Dimensions are (sequence_len, d_embed)
token_ids_tensor_one_hot: Dimensions are (sequence_len, vocab_size)
"""
embedding_matrix = self.model_embeddings
vocab_size = embedding_matrix.shape[0]
one_hot_tensor = self.to(_one_hot_batched(input_ids, vocab_size))
token_ids_tensor_one_hot = one_hot_tensor.clone().requires_grad_(True)
inputs_embeds = torch.matmul(token_ids_tensor_one_hot, embedding_matrix)
return inputs_embeds, token_ids_tensor_one_hot
def _attach_hooks(self, model):
# TODO: Collect activations for more than 1 step
if self._hooks:
# skip if hooks are already attached
return
for name, module in model.named_modules():
# Add hooks to capture activations in every FFNN
if re.search(self.collect_activations_layer_name_sig, name):
# print("mlp.c_proj", self.collect_activations_flag , name)
if self.collect_activations_flag:
self._hooks[name] = module.register_forward_hook(
lambda self_, input_, output,
name=name: self._get_activations_hook(name, input_))
# Register neuron inhibition hook
self._hooks[name + '_inhibit'] = module.register_forward_pre_hook(
lambda self_, input_, name=name: \
self._inhibit_neurons_hook(name, input_)
)
def _remove_hooks(self):
for handle in self._hooks.values():
handle.remove()
self._hooks = {}
def _get_activations_hook(self, name: str, input_):
"""
Collects the activation for all tokens (input and output).
The default activations collection method.
Args:
input_: activation tuple to capture. A tuple containing one tensor of
dimensions (batch_size, sequence_length, neurons)
"""
# print('_get_activations_hook', name)
# pprint(input_)
# print(type(input_), len(input_), type(input_[0]), input_[0].shape, len(input_[0]), input_[0][0].shape)
# in distilGPT and GPT2, the layer name is 'transformer.h.0.mlp.c_fc'
# Extract the number of the layer from the name
# TODO: it will not always be 2 for other models. Move to model-config
# layer_number = int(name.split('.')[2])
# Get the layer number. This will be an int with periods before aand after it.
# (?<=\.) means look for a period before the int
# \d+ means look for one or multiple digits
# (?=\.) means look for a period after the int
layer_number = re.search("(?<=\.)\d+(?=\.)", name).group(0)
layer_type = 'encoder' if name.startswith('encoder.') else 'decoder'
# print("layer number: ", layer_number)
collecting_this_layer = (self.collect_activations_layer_nums is None) or (
layer_number in self.collect_activations_layer_nums)
if collecting_this_layer:
# Initialize the layer's key the first time we encounter it
if layer_number not in self._all_activations_dict:
self._all_activations_dict[layer_type][layer_number] = [0]
# For MLM, we only run one inference step. We save it.
# For Causal LM, we could be running multiple inference steps with generate(). In that case,
# overwrite the previous step activations. This collects all activations in the last step
# Assuming all input tokens are presented as input, no "past"
# The inputs to c_proj already pass through the gelu activation function
self._all_activations_dict[layer_type][layer_number] = input_[0].detach().cpu().numpy()
def _inhibit_neurons_hook(self, name: str, input_tensor):
"""
After being attached as a pre-forward hook, it sets to zero the activation value
of the neurons indicated in self.neurons_to_inhibit
"""
layer_number = re.search("(?<=\.)\d+(?=\.)", name).group(0)
if layer_number in self.neurons_to_inhibit.keys():
# print('layer_number', layer_number, input_tensor[0].shape)
for n in self.neurons_to_inhibit[layer_number]:
# print('inhibiting', layer_number, n)
input_tensor[0][0][-1][n] = 0 # tuple, batch, position
if layer_number in self.neurons_to_induce.keys():
# print('layer_number', layer_number, input_tensor[0].shape)
for n in self.neurons_to_induce[layer_number]:
# print('inhibiting', layer_number, n)
input_tensor[0][0][-1][n] = input_tensor[0][0][-1][n] * 10 # tuple, batch, position
return input_tensor
def display_input_sequence(self, input_ids):
tokens = []
for idx, token_id in enumerate(input_ids):
type = "input"
raw_token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
clean_token = self.tokenizer.decode(token_id)
# Strip prefixes because bert decode still has ## for partials even after decode()
clean_token = strip_tokenizer_prefix(self.model_config, clean_token)
tokens.append({
# 'token': self.tokenizer.decode([token_id]),
'token': clean_token,
'is_partial': is_partial_token(self.model_config, raw_token),
'position': idx,
'token_id': int(token_id),
'type': type})
data = {'tokens': tokens}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
viz_id = f'viz_{round(random.random() * 1000000)}'
# TODO: Stop passing tokenization_config to JS now that
# it's handled with the is_partial parameter
js = f"""
requirejs( ['basic', 'ecco'], function(basic, ecco){{
basic.init('{viz_id}') // Python needs to know the viz id. Used for each output token.
window.ecco['{viz_id}'] = new ecco.renderOutputSequence({{
parentDiv: '{viz_id}',
data: {json.dumps(data)},
tokenization_config: {json.dumps(self.model_config['tokenizer_config'])}
}})
}}, function (err) {{
console.log(err);
}})
"""
d.display(d.Javascript(js))
return viz_id
def display_token(self, viz_id, token_id, position):
raw_token = self.tokenizer.convert_ids_to_tokens([token_id])[0]
clean_token = self.tokenizer.decode(token_id)
# Strip prefixes because bert decode still has ## for partials even after decode()
clean_token = strip_tokenizer_prefix(self.model_config, clean_token)
token = {
# 'token': self.tokenizer.decode([token_id]),
'token': clean_token,
'is_partial': is_partial_token(self.model_config, raw_token),
'token_id': int(token_id),
'position': position,
'type': 'output'
}
js = f"""
// We don't really need these require scripts. But this is to avert
//this code from running before display_input_sequence which DOES require external files
requirejs(['basic', 'ecco'], function(basic, ecco){{
console.log('addToken viz_id', '{viz_id}');
window.ecco['{viz_id}'].addToken({json.dumps(token)})
window.ecco['{viz_id}'].redraw()
}})
"""
# print(js)
d.display(d.Javascript(js))
def predict_token(self, inputs, topk=50, temperature=1.0):
output = self.model(**inputs)
scores = output[0][0][-1] / temperature
s = scores.detach().numpy()
sorted_predictions = s.argsort()[::-1]
sm = F.softmax(scores, dim=-1).detach().numpy()
tokens = [self.tokenizer.decode([t]) for t in sorted_predictions[:topk]]
probs = sm[sorted_predictions[:topk]]
prediction_data = []
for idx, (token, prob) in enumerate(zip(tokens, probs)):
# print(idx, token, prob)
prediction_data.append({'token': token,
'prob': str(prob),
'ranking': idx + 1,
'token_id': str(sorted_predictions[idx])
})
params = prediction_data
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
d.display(d.HTML(filename=os.path.join(self._path, "html", "predict_token.html")))
js = """
requirejs(['predict_token'], function(predict_token){{
if (window.predict === undefined)
window.predict = {{}}
window.predict["{}"] = new predict_token.predictToken("{}", {})
}}
)
""".format(viz_id, viz_id, json.dumps(params))
d.display(d.Javascript(js))
def sample_output_token(scores, do_sample, temperature, top_k, top_p):
# TODO: Add beam search in here
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = transformers.generation_utils. \
top_k_top_p_filtering(scores,
top_k=top_k,
top_p=top_p)
# Sample
probs = F.softmax(next_token_logscores, dim=-1)
prediction_id = torch.multinomial(probs, num_samples=1)
else:
# Greedy decoding
prediction_id = torch.argmax(scores, dim=-1)
prediction_id = prediction_id.squeeze()
return prediction_id
def _one_hot(token_ids: torch.Tensor, vocab_size: int) -> torch.Tensor:
return torch.zeros(len(token_ids), vocab_size, device=token_ids.device).scatter_(1, token_ids.unsqueeze(1), 1.)
def _one_hot_batched(token_ids: torch.Tensor, vocab_size: int) -> torch.Tensor:
batch_size, num_tokens = token_ids.shape
return torch.zeros(batch_size, num_tokens, vocab_size, device=token_ids.device).scatter_(-1, token_ids.unsqueeze(-1), 1.)
def activations_dict_to_array(activations_dict):
"""
Converts the dict used to collect activations into an array of the
shape (batch, layers, neurons, token position).
Args:
activations_dict: python dictionary. Contains a key/value for each layer
in the model whose activations were collected. Key is the layer id ('0', '1').
Value is a tensor of shape (batch, position, neurons).
"""
activations = []
for i in sorted(activations_dict.keys()):
activations.append(activations_dict[i])
activations = np.array(activations)
# 'activations' now is in the shape (layer, batch, position, neurons)
activations = np.swapaxes(activations, 2, 3)
activations = np.swapaxes(activations, 0, 1)
# print('after swapping: ', activations.shape)
return activations
|
en
| 0.737584
|
Ecco's central class. A wrapper around language models. We use it to run the language models and collect important data like input saliency and neuron activations. A LM object is typically not created directly by users, it is returned by `ecco.from_pretrained()`. Usage: ```python import ecco lm = ecco.from_pretrained('distilgpt2') output = lm.generate("Hello computer") ``` # None --> collect for all layers Creates an LM object given a model and tokenizer. Args: model: HuggingFace Transformers Pytorch language model. tokenizer: The tokenizer associated with the model model_name: The name of the model. Used to retrieve required settings (like what the embedding layer is called) config: Configuration that has the information about the layer whose activations we will collect collect_activations_flag: True if we want to collect activations collect_activations_layer_nums: If collecting activations, we can use this parameter to indicate which layers to track. By default this would be None and we'd collect activations for all layers. verbose: If True, model.generate() displays output tokens in HTML as they're generated. gpu: Set to False to force using the CPU even if a GPU exists. # Neuron Activation # For each model, this indicates the layer whose activations # we will collect # If running in Jupyer, outputting setup this in one cell is enough. But for colab # we're running it before every d.HTML cell # d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html"))) # TODO: use encoder mask and also decoder mask Analyzes a predicted token. Currently this methods computes the primary attribution explainability scores for each given token. # deactivate hooks: attr method can perform multiple forward steps # Add attribution scores to self.attributions Generate tokens in response to an input prompt. Works with Language models like GPT2, not masked language models like BERT. Args: input_str: Input prompt. # TODO: accept batch of input strings generate: Number of tokens to generate. max_length: max length of sequence (input + output tokens) temperature: Adjust the probability distibution of output candidate tokens. top_k: Specify top-k tokens to consider in decoding. Only used when do_sample is True. top_p: Specify top-p to consider in decoding. Only used when do_sample is True. do_sample: Decoding parameter. If set to False, the model always always chooses the highest scoring candidate output token. This may lead to repetitive text. If set to True, the model considers consults top_k and/or top_p to generate more interesting output. attribution: List of attribution methods to be calculated. By default, it does not calculate anything. beam_size: Beam size to consider while generating num_return_sequences: number of sequences to return generate_kwargs: Other arguments to be passed directly to self.model.generate # We need this as a batch in order to collect activations. # Get decoder input ids # FIXME: only done because causal LMs like GPT-2 have the _prepare_decoder_input_ids_for_generation method but do not use it # will break otherwise # Print output # Get model output # deactivate hooks: we will run them for the last model forward only # FIXME: +1 in max_length to account for first start token in decoder, find a better way to do this # Get prediction logits for each chosen prediction id # print the generated sequences & confidence scores # Analyze each generated token # reset attributions dict # First get encoder/decoder input embeddings # TODO: This is only okay as long as encoder and decoder share the embeddings # Should make separate ones for more flexibility # -1 because we want to catch the inputs for the last generated token # attach hooks and run last forward step # TODO: collect activation for more than 1 step # Get primary attributions for produced token # Recomputing inputs ids, attention mask and decoder input ids # will break otherwise # Recomputing Attention Mask # will break otherwise # More than one token can be generated at once (e.g., automatic split/pad tokens) # Display token # Add a zero vector to the attributions vector, if we did not reach the last predicted token # Get encoder/decoder hidden states # in Hugging Face Transformers v4, there's an extra index for batch # If there's a batch dimension, pick the first oen # Adding a dimension to concat to later # Earlier versions are only 2 dimensional # But also, in v4, for GPT2, all except the last one would have 3 dims, the last layer # would only have two dims # First hidden state is the embedding layer, skip it # FIXME: do this in a cleaner way # Pass 'hidden_states' to 'decoder_hidden_states' # Turn activations from dict to a proper array # tokens = [] # for i in all_token_ids: # token = self.tokenizer.decode([i]) # tokens.append(token) # Add a batch dimension # Add a batch dimension Run a forward pass through the model. For when we don't care about output tokens. Currently only support activations collection. No attribution/saliency. Usage: ```python inputs = lm.tokenizer("Hello computer", return_tensors="pt") output = lm(inputs) ``` Args: input_tokens: tuple returned by tokenizer( TEXT, return_tensors="pt"). contains key 'input_ids', its value tensor with input token ids. Shape is (batch_size, sequence_length). Also a key for masked tokens # Move inputs to GPU if the model is on GPU # Remove downstream. For now setting to batch length # attach hooks # model # Turn activations from dict to a proper array # First hidden state of the causal model is the embedding layer, skip it # FIXME: do this in a cleaner way Get token embeddings and one-hot vector into vocab. It's done via matrix multiplication so that gradient attribution is available when needed. Args: input_ids: Int tensor containing token ids. Of length (sequence length). Generally returned from the the tokenizer such as lm.tokenizer(text, return_tensors="pt")['input_ids'][0] Returns: inputs_embeds: Embeddings of the tokens. Dimensions are (sequence_len, d_embed) token_ids_tensor_one_hot: Dimensions are (sequence_len, vocab_size) # TODO: Collect activations for more than 1 step # skip if hooks are already attached # Add hooks to capture activations in every FFNN # print("mlp.c_proj", self.collect_activations_flag , name) # Register neuron inhibition hook Collects the activation for all tokens (input and output). The default activations collection method. Args: input_: activation tuple to capture. A tuple containing one tensor of dimensions (batch_size, sequence_length, neurons) # print('_get_activations_hook', name) # pprint(input_) # print(type(input_), len(input_), type(input_[0]), input_[0].shape, len(input_[0]), input_[0][0].shape) # in distilGPT and GPT2, the layer name is 'transformer.h.0.mlp.c_fc' # Extract the number of the layer from the name # TODO: it will not always be 2 for other models. Move to model-config # layer_number = int(name.split('.')[2]) # Get the layer number. This will be an int with periods before aand after it. # (?<=\.) means look for a period before the int # \d+ means look for one or multiple digits # (?=\.) means look for a period after the int # print("layer number: ", layer_number) # Initialize the layer's key the first time we encounter it # For MLM, we only run one inference step. We save it. # For Causal LM, we could be running multiple inference steps with generate(). In that case, # overwrite the previous step activations. This collects all activations in the last step # Assuming all input tokens are presented as input, no "past" # The inputs to c_proj already pass through the gelu activation function After being attached as a pre-forward hook, it sets to zero the activation value of the neurons indicated in self.neurons_to_inhibit # print('layer_number', layer_number, input_tensor[0].shape) # print('inhibiting', layer_number, n) # tuple, batch, position # print('layer_number', layer_number, input_tensor[0].shape) # print('inhibiting', layer_number, n) # tuple, batch, position # Strip prefixes because bert decode still has ## for partials even after decode() # 'token': self.tokenizer.decode([token_id]), # TODO: Stop passing tokenization_config to JS now that # it's handled with the is_partial parameter requirejs( ['basic', 'ecco'], function(basic, ecco){{ basic.init('{viz_id}') // Python needs to know the viz id. Used for each output token. window.ecco['{viz_id}'] = new ecco.renderOutputSequence({{ parentDiv: '{viz_id}', data: {json.dumps(data)}, tokenization_config: {json.dumps(self.model_config['tokenizer_config'])} }}) }}, function (err) {{ console.log(err); }}) # Strip prefixes because bert decode still has ## for partials even after decode() # 'token': self.tokenizer.decode([token_id]), // We don't really need these require scripts. But this is to avert //this code from running before display_input_sequence which DOES require external files requirejs(['basic', 'ecco'], function(basic, ecco){{ console.log('addToken viz_id', '{viz_id}'); window.ecco['{viz_id}'].addToken({json.dumps(token)}) window.ecco['{viz_id}'].redraw() }}) # print(js) # print(idx, token, prob) requirejs(['predict_token'], function(predict_token){{ if (window.predict === undefined) window.predict = {{}} window.predict["{}"] = new predict_token.predictToken("{}", {}) }} ) # TODO: Add beam search in here # Temperature (higher temperature => more likely to sample low probability tokens) # Top-p/top-k filtering # Sample # Greedy decoding Converts the dict used to collect activations into an array of the shape (batch, layers, neurons, token position). Args: activations_dict: python dictionary. Contains a key/value for each layer in the model whose activations were collected. Key is the layer id ('0', '1'). Value is a tensor of shape (batch, position, neurons). # 'activations' now is in the shape (layer, batch, position, neurons) # print('after swapping: ', activations.shape)
| 2.451873
| 2
|
numpymate/packages/convert2/util.py
|
MacHu-GWU/numpymate-project
| 1
|
6627674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def extract_number_from_string(text):
"""Take number like string out of text.
"""
numberstr_list = list()
chunks = list()
for char in text:
if char.isdigit() or char == ".":
chunks.append(char)
else:
if len(chunks):
numberstr_list.append("".join(chunks))
chunks = list()
if len(chunks):
numberstr_list.append("".join(chunks))
new_numberstr_list = list()
for s in numberstr_list:
try:
float(s)
new_numberstr_list.append(s)
except:
pass
return new_numberstr_list
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def extract_number_from_string(text):
"""Take number like string out of text.
"""
numberstr_list = list()
chunks = list()
for char in text:
if char.isdigit() or char == ".":
chunks.append(char)
else:
if len(chunks):
numberstr_list.append("".join(chunks))
chunks = list()
if len(chunks):
numberstr_list.append("".join(chunks))
new_numberstr_list = list()
for s in numberstr_list:
try:
float(s)
new_numberstr_list.append(s)
except:
pass
return new_numberstr_list
|
en
| 0.401762
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Take number like string out of text.
| 4.128644
| 4
|
python/project/src/causal_system.py
|
DTUComputeCognitiveSystems/IntroductionToCausalInference
| 5
|
6627675
|
from ast import literal_eval
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_pydot import graphviz_layout
class CausalSystem:
_project_password = None
def _sample(self, n_samples):
raise NotImplementedError
##########################################################################################
# Internal
# noinspection PyTypeChecker
def __init__(self):
self._interventions = None # type: dict
self._samples = None # type: dict
self._n_samples = None # type: int
# Ordering
self.__ordering = None # type: list
self._node_nr = dict()
# For graph
self._create_graph = False
self._current_ancestors = []
self._ancestors = dict()
self._descendants = dict()
# Always ensure a single sample
_ = self.sample(1)
def sample(self, n_samples, **interventions):
# Set
self._interventions = interventions
self._samples = dict()
self.__ordering = []
self._node_nr = dict()
self._n_samples = n_samples
# Compute
self._create_graph = True
self._sample(n_samples=n_samples)
self._create_graph = False
# Set node-nr
self._node_nr = {key: nr for nr, key in enumerate(self.__ordering)}
# Filter keys
if self._project_password is not None and interventions.get("password", None) == self._project_password:
index = self.__ordering
else:
index = [key for key in self.__ordering if key[0] != "_"]
# Make table
table = pd.DataFrame(data=[self._samples[key] for key in index], index=index, dtype=float).T
# Reset
self._interventions = None
self._samples = None
self._n_samples = None
# Return
return table
def __getitem__(self, item):
# Remember as ancestor if building graph
if self._create_graph:
self._current_ancestors.append(item)
# Return
return self._samples[item]
def __setitem__(self, key, value):
# Assert new item
assert isinstance(self._samples, dict) and key not in self._samples
# Set item
self.__ordering.append(key)
self._samples[key] = np.array(value)
# Intervene if needed
if isinstance(self._interventions, dict) and key in self._interventions:
self._samples[key] = np.ones_like(self._samples[key]) * self._interventions[key]
# Can no longer be changed (we do not allow circular graphs anyway)
self._samples[key].flags.writeable = False
# Make graph
if self._create_graph:
# Set descendants
self._descendants[key] = []
for ancestor in self._current_ancestors:
self._descendants[ancestor].append(key)
# Set ancestors
self._ancestors[key] = self._current_ancestors
# Reset temporary variables
self._current_ancestors = []
@property
def ancestors(self):
return self._ancestors
@property
def descendants(self):
return self._descendants
@property
def n_nodes(self):
return len(self.__ordering)
@property
def nodes(self):
return self.__ordering
@property
def adjacency_matrix(self):
graph = np.zeros((self.n_nodes, self.n_nodes), dtype=np.int)
for ancestor, descendants in self.descendants.items():
for descendant in descendants:
graph[self._node_nr[ancestor], self._node_nr[descendant]] = 1
graph = pd.DataFrame(
data=graph, index=self.__ordering, columns=self.__ordering
)
return graph
@property
def edges(self):
edge_set = set()
for ancestor, descendants in self.descendants.items():
for descendant in descendants:
edge_set.add((ancestor, descendant))
return edge_set
def check_correct_graph(self, edge_list):
# Ensure python object
for _ in range(3):
if isinstance(edge_list, str):
edge_list = literal_eval(edge_list.strip())
# Format
edge_set = set([(str(from_node).lower(), str(to_node).lower()) for from_node, to_node in edge_list])
# Get truth without caring about casing
true_edge_set = {(from_node.lower(), to_node.lower()) for from_node, to_node in self.edges}
# Get truth without hidden nodes
true_edge_set_wo_hidden = {(from_node, to_node) for from_node, to_node in true_edge_set
if "_" not in (from_node[0], to_node[0])}
# Check
return edge_set == true_edge_set or edge_set == true_edge_set_wo_hidden
@property
def _ordering(self):
return self.__ordering
@_ordering.setter
def _ordering(self, val):
assert set(val) == set(self.__ordering), f"Ordering must contain all elements of causal graph.\n" \
f"Graph: {self._ordering}\n" \
f"New order: {val}\n" \
f"Difference: {set(val) ^ set(self._ordering)}"
self.__ordering = val
def draw_causal_graph(self):
# Ensure sampled
_ = self.sample(1)
# Make graph
G = nx.DiGraph()
# Ensure nodes
G.add_nodes_from(self.nodes)
# Add edges
for ancesor, descendants in self.descendants.items():
G.add_edges_from([(ancesor, val) for val in descendants])
# Sizes positions and labels for plot
node_size = 2000
pos = graphviz_layout(G, prog="dot")
labels = [val.strip("_") for val in self.nodes]
# Plot
plt.close("all")
plt.title("Causal Graph", fontsize=20)
nx.draw(
G, with_labels=True, pos=pos, labels=dict(zip(G.nodes, labels)),
# arrowstyle=ArrowStyle("simple", head_length=1.3, head_width=1.3, tail_width=.1),
arrowsize=40,
width=3,
node_size=node_size,
node_color="#ffffff",
edgecolors="#000000",
style="solid",
linewidths=3,
font_size=20,
)
# Fix limits
offset = np.sqrt(node_size) / 2
y_lim = min([val for _, val in pos.values()]) - offset, max([val for _, val in pos.values()]) + offset
x_lim = min([val for val, _ in pos.values()]) - offset, max([val for val, _ in pos.values()]) + offset
plt.xlim(x_lim)
plt.ylim(y_lim)
####################
# Pre-made distributions
def normal(self, mu, std):
return np.random.randn(self._n_samples) * std + mu
def categorical(self, probabilities):
probabilities = np.array(probabilities) / np.sum(probabilities)
choices = np.array(list(range(len(probabilities))), dtype=np.float)
return np.random.choice(a=choices, size=self._n_samples, replace=True, p=probabilities)
def binary(self, p_success):
return self.categorical(probabilities=np.array([1 - p_success, p_success]))
|
from ast import literal_eval
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_pydot import graphviz_layout
class CausalSystem:
_project_password = None
def _sample(self, n_samples):
raise NotImplementedError
##########################################################################################
# Internal
# noinspection PyTypeChecker
def __init__(self):
self._interventions = None # type: dict
self._samples = None # type: dict
self._n_samples = None # type: int
# Ordering
self.__ordering = None # type: list
self._node_nr = dict()
# For graph
self._create_graph = False
self._current_ancestors = []
self._ancestors = dict()
self._descendants = dict()
# Always ensure a single sample
_ = self.sample(1)
def sample(self, n_samples, **interventions):
# Set
self._interventions = interventions
self._samples = dict()
self.__ordering = []
self._node_nr = dict()
self._n_samples = n_samples
# Compute
self._create_graph = True
self._sample(n_samples=n_samples)
self._create_graph = False
# Set node-nr
self._node_nr = {key: nr for nr, key in enumerate(self.__ordering)}
# Filter keys
if self._project_password is not None and interventions.get("password", None) == self._project_password:
index = self.__ordering
else:
index = [key for key in self.__ordering if key[0] != "_"]
# Make table
table = pd.DataFrame(data=[self._samples[key] for key in index], index=index, dtype=float).T
# Reset
self._interventions = None
self._samples = None
self._n_samples = None
# Return
return table
def __getitem__(self, item):
# Remember as ancestor if building graph
if self._create_graph:
self._current_ancestors.append(item)
# Return
return self._samples[item]
def __setitem__(self, key, value):
# Assert new item
assert isinstance(self._samples, dict) and key not in self._samples
# Set item
self.__ordering.append(key)
self._samples[key] = np.array(value)
# Intervene if needed
if isinstance(self._interventions, dict) and key in self._interventions:
self._samples[key] = np.ones_like(self._samples[key]) * self._interventions[key]
# Can no longer be changed (we do not allow circular graphs anyway)
self._samples[key].flags.writeable = False
# Make graph
if self._create_graph:
# Set descendants
self._descendants[key] = []
for ancestor in self._current_ancestors:
self._descendants[ancestor].append(key)
# Set ancestors
self._ancestors[key] = self._current_ancestors
# Reset temporary variables
self._current_ancestors = []
@property
def ancestors(self):
return self._ancestors
@property
def descendants(self):
return self._descendants
@property
def n_nodes(self):
return len(self.__ordering)
@property
def nodes(self):
return self.__ordering
@property
def adjacency_matrix(self):
graph = np.zeros((self.n_nodes, self.n_nodes), dtype=np.int)
for ancestor, descendants in self.descendants.items():
for descendant in descendants:
graph[self._node_nr[ancestor], self._node_nr[descendant]] = 1
graph = pd.DataFrame(
data=graph, index=self.__ordering, columns=self.__ordering
)
return graph
@property
def edges(self):
edge_set = set()
for ancestor, descendants in self.descendants.items():
for descendant in descendants:
edge_set.add((ancestor, descendant))
return edge_set
def check_correct_graph(self, edge_list):
# Ensure python object
for _ in range(3):
if isinstance(edge_list, str):
edge_list = literal_eval(edge_list.strip())
# Format
edge_set = set([(str(from_node).lower(), str(to_node).lower()) for from_node, to_node in edge_list])
# Get truth without caring about casing
true_edge_set = {(from_node.lower(), to_node.lower()) for from_node, to_node in self.edges}
# Get truth without hidden nodes
true_edge_set_wo_hidden = {(from_node, to_node) for from_node, to_node in true_edge_set
if "_" not in (from_node[0], to_node[0])}
# Check
return edge_set == true_edge_set or edge_set == true_edge_set_wo_hidden
@property
def _ordering(self):
return self.__ordering
@_ordering.setter
def _ordering(self, val):
assert set(val) == set(self.__ordering), f"Ordering must contain all elements of causal graph.\n" \
f"Graph: {self._ordering}\n" \
f"New order: {val}\n" \
f"Difference: {set(val) ^ set(self._ordering)}"
self.__ordering = val
def draw_causal_graph(self):
# Ensure sampled
_ = self.sample(1)
# Make graph
G = nx.DiGraph()
# Ensure nodes
G.add_nodes_from(self.nodes)
# Add edges
for ancesor, descendants in self.descendants.items():
G.add_edges_from([(ancesor, val) for val in descendants])
# Sizes positions and labels for plot
node_size = 2000
pos = graphviz_layout(G, prog="dot")
labels = [val.strip("_") for val in self.nodes]
# Plot
plt.close("all")
plt.title("Causal Graph", fontsize=20)
nx.draw(
G, with_labels=True, pos=pos, labels=dict(zip(G.nodes, labels)),
# arrowstyle=ArrowStyle("simple", head_length=1.3, head_width=1.3, tail_width=.1),
arrowsize=40,
width=3,
node_size=node_size,
node_color="#ffffff",
edgecolors="#000000",
style="solid",
linewidths=3,
font_size=20,
)
# Fix limits
offset = np.sqrt(node_size) / 2
y_lim = min([val for _, val in pos.values()]) - offset, max([val for _, val in pos.values()]) + offset
x_lim = min([val for val, _ in pos.values()]) - offset, max([val for val, _ in pos.values()]) + offset
plt.xlim(x_lim)
plt.ylim(y_lim)
####################
# Pre-made distributions
def normal(self, mu, std):
return np.random.randn(self._n_samples) * std + mu
def categorical(self, probabilities):
probabilities = np.array(probabilities) / np.sum(probabilities)
choices = np.array(list(range(len(probabilities))), dtype=np.float)
return np.random.choice(a=choices, size=self._n_samples, replace=True, p=probabilities)
def binary(self, p_success):
return self.categorical(probabilities=np.array([1 - p_success, p_success]))
|
en
| 0.474828
|
########################################################################################## # Internal # noinspection PyTypeChecker # type: dict # type: dict # type: int # Ordering # type: list # For graph # Always ensure a single sample # Set # Compute # Set node-nr # Filter keys # Make table # Reset # Return # Remember as ancestor if building graph # Return # Assert new item # Set item # Intervene if needed # Can no longer be changed (we do not allow circular graphs anyway) # Make graph # Set descendants # Set ancestors # Reset temporary variables # Ensure python object # Format # Get truth without caring about casing # Get truth without hidden nodes # Check # Ensure sampled # Make graph # Ensure nodes # Add edges # Sizes positions and labels for plot # Plot # arrowstyle=ArrowStyle("simple", head_length=1.3, head_width=1.3, tail_width=.1), # Fix limits #################### # Pre-made distributions
| 2.31946
| 2
|
mysite/album/views.py
|
rysnee/DoAn
| 0
|
6627676
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Photo, ResultPhoto
from .forms import PhotoForm
from .VLADlib.VLAD import *
from .VLADlib.Descriptors import *
import itertools
import argparse
import glob
import cv2
def retrieval_k(img, k):
descriptorName = "ORB"
pathVD = "VLADdata/visualDictionary/visualDictionary2ORB.pickle"
treeIndex = "VLADdata/ballTreeIndexes/index_ORB_W2.pickle"
#load the index
with open(treeIndex, 'rb') as f:
indexStructure = pickle.load(f)
#load the visual dictionary
with open(pathVD, 'rb') as f:
visualDictionary = pickle.load(f)
imageID = indexStructure[0]
tree = indexStructure[1]
pathImageData = indexStructure[2]
# computing descriptors
dist, ind = query(img, k, descriptorName, visualDictionary, tree)
dist = dist.flatten()
ind = ind.flatten()
if dist is 0:
return
print(dist)
print(ind)
#ind = list(itertools.chain.from_iterable(ind))
k_image = []
# loop over the results
for i in ind:
# load the result image and display it
k_image.append(imageID[i])
return k_image, dist
def photo_list(request):
photos = Photo.objects.all()
result_photos = ResultPhoto.objects.all()
if request.method == 'POST':
ResultPhoto.objects.all().delete()
form = PhotoForm(request.POST, request.FILES)
if form.is_valid():
img = form.save()
image = img.file.path
#Query the image
result, scores = retrieval_k(image, 10)
print('result =', result)
print('scores =', scores)
if(result == None):
messages.info(request, 'Image is too small!')
else:
result_photos = ResultPhoto.objects.all()
print(ResultPhoto.objects.all().count())
for i in range(len(result)):
r = ResultPhoto(index=result[i], score=scores[i])
r.save()
return render(request, 'album/result_photo_list.html', {'form': form, 'photos': photos, 'result_photos': result_photos})
else:
form = PhotoForm()
return render(request, 'album/photo_list.html', {'form': form, 'photos': photos, 'result_photos': result_photos})
def result_photo_list(request):
return
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Photo, ResultPhoto
from .forms import PhotoForm
from .VLADlib.VLAD import *
from .VLADlib.Descriptors import *
import itertools
import argparse
import glob
import cv2
def retrieval_k(img, k):
descriptorName = "ORB"
pathVD = "VLADdata/visualDictionary/visualDictionary2ORB.pickle"
treeIndex = "VLADdata/ballTreeIndexes/index_ORB_W2.pickle"
#load the index
with open(treeIndex, 'rb') as f:
indexStructure = pickle.load(f)
#load the visual dictionary
with open(pathVD, 'rb') as f:
visualDictionary = pickle.load(f)
imageID = indexStructure[0]
tree = indexStructure[1]
pathImageData = indexStructure[2]
# computing descriptors
dist, ind = query(img, k, descriptorName, visualDictionary, tree)
dist = dist.flatten()
ind = ind.flatten()
if dist is 0:
return
print(dist)
print(ind)
#ind = list(itertools.chain.from_iterable(ind))
k_image = []
# loop over the results
for i in ind:
# load the result image and display it
k_image.append(imageID[i])
return k_image, dist
def photo_list(request):
photos = Photo.objects.all()
result_photos = ResultPhoto.objects.all()
if request.method == 'POST':
ResultPhoto.objects.all().delete()
form = PhotoForm(request.POST, request.FILES)
if form.is_valid():
img = form.save()
image = img.file.path
#Query the image
result, scores = retrieval_k(image, 10)
print('result =', result)
print('scores =', scores)
if(result == None):
messages.info(request, 'Image is too small!')
else:
result_photos = ResultPhoto.objects.all()
print(ResultPhoto.objects.all().count())
for i in range(len(result)):
r = ResultPhoto(index=result[i], score=scores[i])
r.save()
return render(request, 'album/result_photo_list.html', {'form': form, 'photos': photos, 'result_photos': result_photos})
else:
form = PhotoForm()
return render(request, 'album/photo_list.html', {'form': form, 'photos': photos, 'result_photos': result_photos})
def result_photo_list(request):
return
|
en
| 0.358408
|
#load the index #load the visual dictionary # computing descriptors #ind = list(itertools.chain.from_iterable(ind)) # loop over the results # load the result image and display it #Query the image
| 2.24057
| 2
|
salt/modules/win_timezone.py
|
johnskopis/salt
| 5
|
6627677
|
<reponame>johnskopis/salt<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
Module for managing timezone on Windows systems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
from datetime import datetime
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'timezone'
class TzMapper(object):
def __init__(self, unix_to_win):
self.win_to_unix = {k.lower(): v for k, v in unix_to_win.items()}
self.unix_to_win = {v.lower(): k for k, v in unix_to_win.items()}
def add(self, k, v):
self.unix_to_win[k.lower()] = v
self.win_to_unix[v.lower()] = k
def remove(self, k):
self.win_to_unix.pop(self.unix_to_win.pop(k.lower()).lower())
def get_win(self, key, default=None):
return self.unix_to_win.get(key.lower(), default)
def get_unix(self, key, default=None):
return self.win_to_unix.get(key.lower(), default)
def list_win(self):
return sorted(self.unix_to_win.values())
def list_unix(self):
return sorted(self.win_to_unix.values())
mapper = TzMapper({
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Aleutian Standard Time': 'America/Adak',
'Altai Standard Time': 'Asia/Barnaul',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Astrakhan Standard Time': 'Europe/Astrakhan',
'Atlantic Standard Time': 'America/Halifax',
'Aus Central W. Standard Time': 'Australia/Eucla',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Belarus Standard Time': 'Europe/Minsk',
'Bougainville Standard Time': 'Pacific/Bougainville',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'Chatham Islands Standard Time': 'Pacific/Chatham',
'China Standard Time': 'Asia/Shanghai',
'Cuba Standard Time': 'America/Havana',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Europe/Chisinau',
'E. South America Standard Time': 'America/Sao_Paulo',
'Easter Island Standard Time': 'Pacific/Easter',
'Eastern Standard Time': 'America/New_York',
'Eastern Standard Time (Mexico)': 'America/Cancun',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Haiti Standard Time': 'America/Port-au-Prince',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Line Islands Standard Time': 'Pacific/Kiritimati',
'Lord Howe Standard Time': 'Australia/Lord_Howe',
'Magadan Standard Time': 'Asia/Magadan',
'Magallanes Standard Time': 'America/Punta_Arenas',
'Marquesas Standard Time': 'Pacific/Marquesas',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'Norfolk Standard Time': 'Pacific/Norfolk',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'North Korea Standard Time': 'Asia/Pyongyang',
'Omsk Standard Time': 'Asia/Omsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Tijuana',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russia Time Zone 10': 'Asia/Srednekolymsk',
'Russia Time Zone 11': 'Asia/Kamchatka',
'Russia Time Zone 3': 'Europe/Samara',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Saint Pierre Standard Time': 'America/Miquelon',
'Sakhalin Standard Time': 'Asia/Sakhalin',
'Samoa Standard Time': 'Pacific/Apia',
'Saratov Standard Time': 'Europe/Saratov',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tocantins Standard Time': 'America/Araguaina',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tomsk Standard Time': 'Asia/Tomsk',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Transbaikal Standard Time': 'Asia/Chita',
'Turkey Standard Time': 'Europe/Istanbul',
'Turks And Caicos Standard Time': 'America/Grand_Turk',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC+13': 'Etc/GMT-13',
'UTC-02': 'Etc/GMT+2',
'UTC-08': 'Etc/GMT+8',
'UTC-09': 'Etc/GMT+9',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'W. Mongolia Standard Time': 'Asia/Hovd',
'West Asia Standard Time': 'Asia/Tashkent',
'West Bank Standard Time': 'Asia/Hebron',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'})
def __virtual__():
'''
Only load on windows
'''
if not __utils__['platform.is_windows']():
return False, "Module win_timezone: Not on Windows client"
if not HAS_PYTZ:
return False, "Module win_timezone: pytz not found"
if not __utils__['path.which']('tzutil'):
return False, "Module win_timezone: tzutil not found"
return __virtualname__
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
cmd = ['tzutil', '/g']
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] or not res['stdout']:
raise CommandExecutionError('tzutil encountered an error getting '
'timezone',
info=res)
return mapper.get_unix(res['stdout'].lower(), 'Unknown')
def get_offset():
'''
Get current numeric timezone offset from UTC (i.e. -0700)
Returns:
str: Offset from UTC
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
# http://craigglennie.com/programming/python/2013/07/21/working-with-timezones-using-Python-and-pytz-localize-vs-normalize/
tz_object = pytz.timezone(get_zone())
utc_time = pytz.utc.localize(datetime.utcnow())
loc_time = utc_time.astimezone(tz_object)
norm_time = tz_object.normalize(loc_time)
return norm_time.strftime('%z')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
Returns:
str: An abbreviated timezone code
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
tz_object = pytz.timezone(get_zone())
loc_time = tz_object.localize(datetime.utcnow())
return loc_time.tzname()
def set_zone(timezone):
'''
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
win_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
# Set the value
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
def zone_compare(timezone):
'''
Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
check_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
check_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'
''.format(timezone))
return get_zone() == mapper.get_unix(check_zone, 'Unknown')
def list(unix_style=True):
'''
Return a list of Timezones that this module supports. These can be in either
Unix or Windows format.
.. versionadded:: 2018.3.3
Args:
unix_style (bool):
``True`` returns Unix-style timezones. ``False`` returns
Windows-style timezones. Default is ``True``
Returns:
list: A list of supported timezones
CLI Example:
.. code-block:: bash
# Unix-style timezones
salt '*' timezone.list
# Windows-style timezones
salt '*' timezone.list unix_style=False
'''
if unix_style:
return mapper.list_unix()
else:
return mapper.list_win()
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
.. note::
The hardware clock is always local time on Windows so this will always
return "localtime"
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
# The hardware clock is always localtime on Windows
return 'localtime'
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
.. note::
The hardware clock is always local time on Windows so this will always
return ``False``
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
# The hardware clock is always localtime on Windows
return False
|
# -*- coding: utf-8 -*-
'''
Module for managing timezone on Windows systems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import Python libs
import logging
from datetime import datetime
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd party libs
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'timezone'
class TzMapper(object):
def __init__(self, unix_to_win):
self.win_to_unix = {k.lower(): v for k, v in unix_to_win.items()}
self.unix_to_win = {v.lower(): k for k, v in unix_to_win.items()}
def add(self, k, v):
self.unix_to_win[k.lower()] = v
self.win_to_unix[v.lower()] = k
def remove(self, k):
self.win_to_unix.pop(self.unix_to_win.pop(k.lower()).lower())
def get_win(self, key, default=None):
return self.unix_to_win.get(key.lower(), default)
def get_unix(self, key, default=None):
return self.win_to_unix.get(key.lower(), default)
def list_win(self):
return sorted(self.unix_to_win.values())
def list_unix(self):
return sorted(self.win_to_unix.values())
mapper = TzMapper({
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Aleutian Standard Time': 'America/Adak',
'Altai Standard Time': 'Asia/Barnaul',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Astrakhan Standard Time': 'Europe/Astrakhan',
'Atlantic Standard Time': 'America/Halifax',
'Aus Central W. Standard Time': 'Australia/Eucla',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Belarus Standard Time': 'Europe/Minsk',
'Bougainville Standard Time': 'Pacific/Bougainville',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'Chatham Islands Standard Time': 'Pacific/Chatham',
'China Standard Time': 'Asia/Shanghai',
'Cuba Standard Time': 'America/Havana',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Europe/Chisinau',
'E. South America Standard Time': 'America/Sao_Paulo',
'Easter Island Standard Time': 'Pacific/Easter',
'Eastern Standard Time': 'America/New_York',
'Eastern Standard Time (Mexico)': 'America/Cancun',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Haiti Standard Time': 'America/Port-au-Prince',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Line Islands Standard Time': 'Pacific/Kiritimati',
'Lord Howe Standard Time': 'Australia/Lord_Howe',
'Magadan Standard Time': 'Asia/Magadan',
'Magallanes Standard Time': 'America/Punta_Arenas',
'Marquesas Standard Time': 'Pacific/Marquesas',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'Norfolk Standard Time': 'Pacific/Norfolk',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'North Korea Standard Time': 'Asia/Pyongyang',
'Omsk Standard Time': 'Asia/Omsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Tijuana',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russia Time Zone 10': 'Asia/Srednekolymsk',
'Russia Time Zone 11': 'Asia/Kamchatka',
'Russia Time Zone 3': 'Europe/Samara',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Saint Pierre Standard Time': 'America/Miquelon',
'Sakhalin Standard Time': 'Asia/Sakhalin',
'Samoa Standard Time': 'Pacific/Apia',
'Saratov Standard Time': 'Europe/Saratov',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tocantins Standard Time': 'America/Araguaina',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tomsk Standard Time': 'Asia/Tomsk',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Transbaikal Standard Time': 'Asia/Chita',
'Turkey Standard Time': 'Europe/Istanbul',
'Turks And Caicos Standard Time': 'America/Grand_Turk',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC+13': 'Etc/GMT-13',
'UTC-02': 'Etc/GMT+2',
'UTC-08': 'Etc/GMT+8',
'UTC-09': 'Etc/GMT+9',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'W. Mongolia Standard Time': 'Asia/Hovd',
'West Asia Standard Time': 'Asia/Tashkent',
'West Bank Standard Time': 'Asia/Hebron',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'})
def __virtual__():
'''
Only load on windows
'''
if not __utils__['platform.is_windows']():
return False, "Module win_timezone: Not on Windows client"
if not HAS_PYTZ:
return False, "Module win_timezone: pytz not found"
if not __utils__['path.which']('tzutil'):
return False, "Module win_timezone: tzutil not found"
return __virtualname__
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
cmd = ['tzutil', '/g']
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] or not res['stdout']:
raise CommandExecutionError('tzutil encountered an error getting '
'timezone',
info=res)
return mapper.get_unix(res['stdout'].lower(), 'Unknown')
def get_offset():
'''
Get current numeric timezone offset from UTC (i.e. -0700)
Returns:
str: Offset from UTC
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
# http://craigglennie.com/programming/python/2013/07/21/working-with-timezones-using-Python-and-pytz-localize-vs-normalize/
tz_object = pytz.timezone(get_zone())
utc_time = pytz.utc.localize(datetime.utcnow())
loc_time = utc_time.astimezone(tz_object)
norm_time = tz_object.normalize(loc_time)
return norm_time.strftime('%z')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
Returns:
str: An abbreviated timezone code
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
tz_object = pytz.timezone(get_zone())
loc_time = tz_object.localize(datetime.utcnow())
return loc_time.tzname()
def set_zone(timezone):
'''
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
win_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
# Set the value
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
def zone_compare(timezone):
'''
Compares the given timezone with the machine timezone. Mostly useful for
running state checks.
Args:
timezone (str):
The timezone to compare. This can be in Windows or Unix format. Can
be any of the values returned by the ``timezone.list`` function
Returns:
bool: ``True`` if they match, otherwise ``False``
Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
check_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
check_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'
''.format(timezone))
return get_zone() == mapper.get_unix(check_zone, 'Unknown')
def list(unix_style=True):
'''
Return a list of Timezones that this module supports. These can be in either
Unix or Windows format.
.. versionadded:: 2018.3.3
Args:
unix_style (bool):
``True`` returns Unix-style timezones. ``False`` returns
Windows-style timezones. Default is ``True``
Returns:
list: A list of supported timezones
CLI Example:
.. code-block:: bash
# Unix-style timezones
salt '*' timezone.list
# Windows-style timezones
salt '*' timezone.list unix_style=False
'''
if unix_style:
return mapper.list_unix()
else:
return mapper.list_win()
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
.. note::
The hardware clock is always local time on Windows so this will always
return "localtime"
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
# The hardware clock is always localtime on Windows
return 'localtime'
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
.. note::
The hardware clock is always local time on Windows so this will always
return ``False``
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
# The hardware clock is always localtime on Windows
return False
|
en
| 0.530024
|
# -*- coding: utf-8 -*- Module for managing timezone on Windows systems. # Import Python libs # Import Salt libs # Import 3rd party libs # Define the module's virtual name Only load on windows Get current timezone (i.e. America/Denver) Returns: str: Timezone in unix format Raises: CommandExecutionError: If timezone could not be gathered CLI Example: .. code-block:: bash salt '*' timezone.get_zone Get current numeric timezone offset from UTC (i.e. -0700) Returns: str: Offset from UTC CLI Example: .. code-block:: bash salt '*' timezone.get_offset # http://craigglennie.com/programming/python/2013/07/21/working-with-timezones-using-Python-and-pytz-localize-vs-normalize/ Get current timezone (i.e. PST, MDT, etc) Returns: str: An abbreviated timezone code CLI Example: .. code-block:: bash salt '*' timezone.get_zonecode Sets the timezone using the tzutil. Args: timezone (str): A valid timezone Returns: bool: ``True`` if successful, otherwise ``False`` Raises: CommandExecutionError: If invalid timezone is passed CLI Example: .. code-block:: bash salt '*' timezone.set_zone 'America/Denver' # if it's one of the key's just use it # if it's one of the values, use the key # Raise error because it's neither key nor value # Set the value Compares the given timezone with the machine timezone. Mostly useful for running state checks. Args: timezone (str): The timezone to compare. This can be in Windows or Unix format. Can be any of the values returned by the ``timezone.list`` function Returns: bool: ``True`` if they match, otherwise ``False`` Example: .. code-block:: bash salt '*' timezone.zone_compare 'America/Denver' # if it's one of the key's just use it # if it's one of the values, use the key # Raise error because it's neither key nor value Return a list of Timezones that this module supports. These can be in either Unix or Windows format. .. versionadded:: 2018.3.3 Args: unix_style (bool): ``True`` returns Unix-style timezones. ``False`` returns Windows-style timezones. Default is ``True`` Returns: list: A list of supported timezones CLI Example: .. code-block:: bash # Unix-style timezones salt '*' timezone.list # Windows-style timezones salt '*' timezone.list unix_style=False Get current hardware clock setting (UTC or localtime) .. note:: The hardware clock is always local time on Windows so this will always return "localtime" CLI Example: .. code-block:: bash salt '*' timezone.get_hwclock # The hardware clock is always localtime on Windows Sets the hardware clock to be either UTC or localtime .. note:: The hardware clock is always local time on Windows so this will always return ``False`` CLI Example: .. code-block:: bash salt '*' timezone.set_hwclock UTC # The hardware clock is always localtime on Windows
| 2.571921
| 3
|
mmaction/core/evaluation/recall.py
|
why-know/YF-OpenLib-mmaction2
| 9
|
6627678
|
<reponame>why-know/YF-OpenLib-mmaction2
import numpy as np
import torch
from mmaction.utils import import_module_error_func
try:
from mmdet.core import bbox_overlaps
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmdet')
def bbox_overlaps(*args, **kwargs):
pass
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
ious_ = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros(ious.shape[0])
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
ious_[k, :] = tmp_ious
ious_ = np.fliplr(np.sort(ious_, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (ious_ >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format."""
if isinstance(proposal_nums, list):
proposal_nums_ = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
proposal_nums_ = np.array([proposal_nums])
else:
proposal_nums_ = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, list):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return proposal_nums_, _iou_thrs
def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=None):
"""Calculate recalls.
Args:
gts(list or ndarray): a list of arrays of shape (n, 4)
proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums(int or list of int or ndarray): top N proposals
thrs(float or list or ndarray): iou thresholds
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(
torch.tensor(gts[i]),
torch.tensor(img_proposal[:prop_num, :4]))
ious = ious.data.numpy()
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
return recalls
|
import numpy as np
import torch
from mmaction.utils import import_module_error_func
try:
from mmdet.core import bbox_overlaps
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmdet')
def bbox_overlaps(*args, **kwargs):
pass
def _recalls(all_ious, proposal_nums, thrs):
img_num = all_ious.shape[0]
total_gt_num = sum([ious.shape[0] for ious in all_ious])
ious_ = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
for k, proposal_num in enumerate(proposal_nums):
tmp_ious = np.zeros(0)
for i in range(img_num):
ious = all_ious[i][:, :proposal_num].copy()
gt_ious = np.zeros(ious.shape[0])
if ious.size == 0:
tmp_ious = np.hstack((tmp_ious, gt_ious))
continue
for j in range(ious.shape[0]):
gt_max_overlaps = ious.argmax(axis=1)
max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
gt_idx = max_ious.argmax()
gt_ious[j] = max_ious[gt_idx]
box_idx = gt_max_overlaps[gt_idx]
ious[gt_idx, :] = -1
ious[:, box_idx] = -1
tmp_ious = np.hstack((tmp_ious, gt_ious))
ious_[k, :] = tmp_ious
ious_ = np.fliplr(np.sort(ious_, axis=1))
recalls = np.zeros((proposal_nums.size, thrs.size))
for i, thr in enumerate(thrs):
recalls[:, i] = (ious_ >= thr).sum(axis=1) / float(total_gt_num)
return recalls
def set_recall_param(proposal_nums, iou_thrs):
"""Check proposal_nums and iou_thrs and set correct format."""
if isinstance(proposal_nums, list):
proposal_nums_ = np.array(proposal_nums)
elif isinstance(proposal_nums, int):
proposal_nums_ = np.array([proposal_nums])
else:
proposal_nums_ = proposal_nums
if iou_thrs is None:
_iou_thrs = np.array([0.5])
elif isinstance(iou_thrs, list):
_iou_thrs = np.array(iou_thrs)
elif isinstance(iou_thrs, float):
_iou_thrs = np.array([iou_thrs])
else:
_iou_thrs = iou_thrs
return proposal_nums_, _iou_thrs
def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=None):
"""Calculate recalls.
Args:
gts(list or ndarray): a list of arrays of shape (n, 4)
proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
proposal_nums(int or list of int or ndarray): top N proposals
thrs(float or list or ndarray): iou thresholds
Returns:
ndarray: recalls of different ious and proposal nums
"""
img_num = len(gts)
assert img_num == len(proposals)
proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
all_ious = []
for i in range(img_num):
if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
scores = proposals[i][:, 4]
sort_idx = np.argsort(scores)[::-1]
img_proposal = proposals[i][sort_idx, :]
else:
img_proposal = proposals[i]
prop_num = min(img_proposal.shape[0], proposal_nums[-1])
if gts[i] is None or gts[i].shape[0] == 0:
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
else:
ious = bbox_overlaps(
torch.tensor(gts[i]),
torch.tensor(img_proposal[:prop_num, :4]))
ious = ious.data.numpy()
all_ious.append(ious)
all_ious = np.array(all_ious)
recalls = _recalls(all_ious, proposal_nums, iou_thrs)
return recalls
|
en
| 0.587847
|
Check proposal_nums and iou_thrs and set correct format. Calculate recalls. Args: gts(list or ndarray): a list of arrays of shape (n, 4) proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5) proposal_nums(int or list of int or ndarray): top N proposals thrs(float or list or ndarray): iou thresholds Returns: ndarray: recalls of different ious and proposal nums
| 1.833391
| 2
|
tests/gclient_test.py
|
newtalk/depot_tools
| 0
|
6627679
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
import gclient_scm
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=no-self-use
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.Dependency.CreateSCM
gclient.Dependency.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.Dependency.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
url = 'proto://host/path/@revision'
d = gclient.Dependency(
None, 'name', url, url, None, None, None,
None, '', True, False, None, True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
gclient.Dependency(
obj, 'bar', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'raw_url', 'url', None, None, None,
None, 'DEPS', True, False, None, True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=protected-access
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(263, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('bar', 'svn://example.com/bar'),
('bar/unix', 'svn://example.com/bar/unix'),
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testTargetOsForHooksInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file runs the right
entries in hooks_os.
"""
write(
'DEPS',
'hooks = [\n'
' {\n'
' "name": "a",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_a" ],\n'
' },\n'
']\n'
'\n'
'hooks_os = {\n'
' "blorp": ['
' {\n'
' "name": "b",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_b" ],\n'
' },\n'
' ],\n'
'}\n')
write(
'.gclient',
'solutions = [\n'
' { "name": ".",\n'
' "url": "svn://example.com/",\n'
' }]\n')
# Test for an OS not in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'zippy'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['zippy'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a')])
# Test for OS that has extra hooks in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'blorp'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['blorp'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a'),
('python', 'do_b')])
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os12_bar'}}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': 'default_foo'}
),
(
# No-op override. Regression test for http://crbug.com/735418 .
{'foo': 'default_foo'},
{'os1': { 'foo': 'default_foo' } },
[],
{'foo': {'should_process': True, 'url': 'default_foo'}}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(
deps, deps_os, target_os_list, False)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testUpdateWithOsDepsInvalid(self):
test_data = [
# Tuples of deps, deps_os, os_list.
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
),
]
for deps, deps_os, target_os_list in test_data:
with self.assertRaises(gclient_utils.Error):
gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list, False)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "<EMAIL>:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'raw_url', 'url',
None, None, None, None, '', True, False, None, True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path cannot override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
with self.assertRaises(gclient_utils.Error):
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('foo/tar', 'svn://example.com/tar'),
],
sorted(self._get_processed()))
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRelativeRecursion(self):
"""Verifies that nested use_relative_paths is always respected."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/bar/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testRecursedepsAltfile(self):
"""Verifies gclient respects the |recursedeps| var syntax with overridden
target DEPS file.
This is what we mean to check here:
- Naming an alternate DEPS file in recursedeps pulls from that one.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = [("bar", "DEPS.alt")]')
write(os.path.join('bar', 'DEPS'), 'ERROR ERROR ERROR')
write(
os.path.join('bar', 'DEPS.alt'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testDepsFromNotAllowedHostsUnspecified(self):
"""Verifies gclient works fine with DEPS without allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsOK(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["example.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(['example.com']), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsBad(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["other.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals(frozenset(['other.com']), dep.allowed_hosts)
self.assertEquals([dep.dependencies[0]], dep.findDepsFromNotAllowedHosts())
self._get_processed()
def testDepsParseFailureWithEmptyAllowedHosts(self):
"""Verifies gclient fails with defined but empty allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = []\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testDepsParseFailureWithNonIterableAllowedHosts(self):
"""Verifies gclient fails with defined but non-iterable allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = None\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testCreatesCipdDependencies(self):
"""Verifies something."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'vars = {\n'
' "lemur_version": "version:1234",\n'
'}\n'
'deps = {\n'
' "bar": {\n'
' "packages": [{\n'
' "package": "lemur",\n'
' "version": Var("lemur_version"),\n'
' }],\n'
' "dep_type": "cipd",\n'
' }\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
options.validate_syntax = True
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEquals(1, len(obj.dependencies))
sol = obj.dependencies[0]
sol._condition = 'some_condition'
sol.ParseDepsFile()
self.assertEquals(1, len(sol.dependencies))
dep = sol.dependencies[0]
self.assertIsInstance(dep, gclient.CipdDependency)
self.assertEquals(
'https://chrome-infra-packages.appspot.com/lemur@version:1234',
dep.url)
def testSameDirAllowMultipleCipdDeps(self):
"""Verifies gclient allow multiple cipd deps under same directory."""
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
cipd_root = gclient_scm.CipdRoot(
os.path.join(self.root_dir, 'dir1'), 'https://example.com')
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'foo_package',
'version': 'foo_version'},
cipd_root, None, True, False,
'fake_condition', True),
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'bar_package',
'version': 'bar_version'},
cipd_root, None, True, False,
'fake_condition', True),
],
[])
dep0 = obj.dependencies[0].dependencies[0]
dep1 = obj.dependencies[0].dependencies[1]
self.assertEquals('https://example.com/foo_package@foo_version', dep0.url)
self.assertEquals('https://example.com/bar_package@bar_version', dep1.url)
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
import gclient_scm
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=no-self-use
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.Dependency.CreateSCM
gclient.Dependency.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.Dependency.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
url = 'proto://host/path/@revision'
d = gclient.Dependency(
None, 'name', url, url, None, None, None,
None, '', True, False, None, True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
gclient.Dependency(
obj, 'bar', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'raw_url', 'url', None, None, None,
None, 'DEPS', True, False, None, True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=protected-access
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(263, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('bar', 'svn://example.com/bar'),
('bar/unix', 'svn://example.com/bar/unix'),
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testTargetOsForHooksInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file runs the right
entries in hooks_os.
"""
write(
'DEPS',
'hooks = [\n'
' {\n'
' "name": "a",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_a" ],\n'
' },\n'
']\n'
'\n'
'hooks_os = {\n'
' "blorp": ['
' {\n'
' "name": "b",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_b" ],\n'
' },\n'
' ],\n'
'}\n')
write(
'.gclient',
'solutions = [\n'
' { "name": ".",\n'
' "url": "svn://example.com/",\n'
' }]\n')
# Test for an OS not in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'zippy'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['zippy'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a')])
# Test for OS that has extra hooks in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'blorp'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['blorp'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a'),
('python', 'do_b')])
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os12_bar'}}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': 'default_foo'}
),
(
# No-op override. Regression test for http://crbug.com/735418 .
{'foo': 'default_foo'},
{'os1': { 'foo': 'default_foo' } },
[],
{'foo': {'should_process': True, 'url': 'default_foo'}}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(
deps, deps_os, target_os_list, False)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testUpdateWithOsDepsInvalid(self):
test_data = [
# Tuples of deps, deps_os, os_list.
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
),
]
for deps, deps_os, target_os_list in test_data:
with self.assertRaises(gclient_utils.Error):
gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list, False)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "<EMAIL>:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'raw_url', 'url',
None, None, None, None, '', True, False, None, True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path cannot override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
with self.assertRaises(gclient_utils.Error):
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('foo/tar', 'svn://example.com/tar'),
],
sorted(self._get_processed()))
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRelativeRecursion(self):
"""Verifies that nested use_relative_paths is always respected."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/bar/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testRecursedepsAltfile(self):
"""Verifies gclient respects the |recursedeps| var syntax with overridden
target DEPS file.
This is what we mean to check here:
- Naming an alternate DEPS file in recursedeps pulls from that one.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = [("bar", "DEPS.alt")]')
write(os.path.join('bar', 'DEPS'), 'ERROR ERROR ERROR')
write(
os.path.join('bar', 'DEPS.alt'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testDepsFromNotAllowedHostsUnspecified(self):
"""Verifies gclient works fine with DEPS without allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsOK(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["example.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(['example.com']), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsBad(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["other.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals(frozenset(['other.com']), dep.allowed_hosts)
self.assertEquals([dep.dependencies[0]], dep.findDepsFromNotAllowedHosts())
self._get_processed()
def testDepsParseFailureWithEmptyAllowedHosts(self):
"""Verifies gclient fails with defined but empty allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = []\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testDepsParseFailureWithNonIterableAllowedHosts(self):
"""Verifies gclient fails with defined but non-iterable allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = None\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testCreatesCipdDependencies(self):
"""Verifies something."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'vars = {\n'
' "lemur_version": "version:1234",\n'
'}\n'
'deps = {\n'
' "bar": {\n'
' "packages": [{\n'
' "package": "lemur",\n'
' "version": Var("lemur_version"),\n'
' }],\n'
' "dep_type": "cipd",\n'
' }\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
options.validate_syntax = True
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEquals(1, len(obj.dependencies))
sol = obj.dependencies[0]
sol._condition = 'some_condition'
sol.ParseDepsFile()
self.assertEquals(1, len(sol.dependencies))
dep = sol.dependencies[0]
self.assertIsInstance(dep, gclient.CipdDependency)
self.assertEquals(
'https://chrome-infra-packages.appspot.com/lemur@version:1234',
dep.url)
def testSameDirAllowMultipleCipdDeps(self):
"""Verifies gclient allow multiple cipd deps under same directory."""
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
cipd_root = gclient_scm.CipdRoot(
os.path.join(self.root_dir, 'dir1'), 'https://example.com')
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'foo_package',
'version': 'foo_version'},
cipd_root, None, True, False,
'fake_condition', True),
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'bar_package',
'version': 'bar_version'},
cipd_root, None, True, False,
'fake_condition', True),
],
[])
dep0 = obj.dependencies[0].dependencies[0]
dep1 = obj.dependencies[0].dependencies[1]
self.assertEquals('https://example.com/foo_package@foo_version', dep0.url)
self.assertEquals('https://example.com/bar_package@bar_version', dep1.url)
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
|
en
| 0.865705
|
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Unit tests for gclient.py. See gclient_smoketest.py for integration tests. Writes the content of a file and create the directories as needed. # pylint: disable=no-self-use # Manual mocks. Verifies that dependencies are processed in the right order. e.g. if there is a dependency 'src' and another 'src/third_party/bar', that bar isn't fetched until 'src' is done. Args: |jobs| is the number of parallel jobs simulated. # This one will depend on dir1/dir2 in bar. # There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2. # We don't care of the ordering of these items except that bar must be # before bar/empty. Retrieves the item in the order they were processed. # Invalid urls causes pain when specifying requirements. Make sure it's # auto-fixed. # Make sure __str__() works fine. # pylint: disable=protected-access # Make sure the custom hooks for that project don't affect the next one. Verifies that specifying a target_os pulls in all relevant dependencies. The target_os variable allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. The value will be appended to the _enforced_os tuple. Verifies that specifying a target_os and target_os_only pulls in only the relevant dependencies. The target_os variable allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. With target_os_only also set, the _enforced_os tuple will be set to only the target_os value. Verifies that specifying a target_os_only without target_os_only raises an exception. Verifies that specifying a target_os value in a DEPS file pulls in all relevant dependencies. The target_os variable in a DEPS file allows specifying the name of an additional OS which should be considered when selecting dependencies from a DEPS' deps_os. The value will be appended to the _enforced_os tuple. Verifies that specifying a target_os value in a DEPS file runs the right entries in hooks_os. # Test for an OS not in hooks_os. # Test for OS that has extra hooks in hooks_os. Verifies that complicated deps_os constructs result in the correct data also with multple operating systems. Also see testDepsOsOverrideDepsInDepsFile. # Tuples of deps, deps_os, os_list and expected_deps. # OS with no overrides at all. # One OS wants to add a module. # One OS wants to add a module. One doesn't care. # Two OSes want to add a module with the same definition. # One OS doesn't need module, one OS wants the default. # OS doesn't need module. # No-op override. Regression test for http://crbug.com/735418 . # Tuples of deps, deps_os, os_list. # OS wants a different version of module. # One OS doesn't need module, another OS wants a special version. Verifies expected behavior of LateOverride. Verifies that a 'deps_os' path cannot override a 'deps' path. Also see testUpdateWithOsDeps above. # This path is to be overridden by similar path # in deps_os['unix']. Verifies gclient respects the |recursion| var syntax. We check several things here: - |recursion| = 3 sets recursion on the foo dep to exactly 3 (we pull /fizz, but not /fuzz) - pulling foo/bar at recursion level 1 (in .gclient) is overriden by a later pull of foo/bar at recursion level 2 (in the dep tree) Verifies gclient respects the |recursedeps| var syntax. This is what we mean to check here: - |recursedeps| = [...] on 2 levels means we pull exactly 3 deps (up to /fizz, but not /fuzz) - pulling foo/bar with no recursion (in .gclient) is overriden by a later pull of foo/bar with recursion (in the dep tree) - pulling foo/tar with no recursion (in .gclient) is no recursively pulled (taz is left out) Verifies gclient respects |recursedeps| with relative paths. Verifies that nested use_relative_paths is always respected. Verifies gclient respects |recursion| over |recursedeps|. |recursion| is set in a top-level DEPS file. That value is meant to affect how many subdeps are parsed via recursion. |recursedeps| is set in each DEPS file to control whether or not to recurse into the immediate next subdep. This test verifies that if both syntaxes are mixed in a DEPS file, we disable |recursedeps| support and only obey |recursion|. Since this setting is evaluated per DEPS file, recursed DEPS files will each be re-evaluated according to the per DEPS rules. So a DEPS that only contains |recursedeps| could then override any previous |recursion| setting. There is extra processing to ensure this does not happen. For this test to work correctly, we need to use a DEPS chain that only contains recursion controls in the top DEPS file. In foo, |recursion| and |recursedeps| are specified. When we see |recursion|, we stop trying to use |recursedeps|. There are 2 constructions of DEPS here that are key to this test: (1) In foo, if we used |recursedeps| instead of |recursion|, we would also pull in bar. Since bar's DEPS doesn't contain any recursion statements, we would stop processing at bar. (2) In fizz, if we used |recursedeps| at all, we should pull in fuzz. We expect to keep going past bar (satisfying 1) and we don't expect to pull in fuzz (satisfying 2). # Deps after this would have been skipped if we were obeying # |recursedeps|. # And this dep would have been picked up if we were obeying # |recursedeps|. # 'svn://example.com/foo/bar/baz/fuzz', Verifies gclient respects the |recursedeps| var syntax with overridden target DEPS file. This is what we mean to check here: - Naming an alternate DEPS file in recursedeps pulls from that one. Verifies gclient respects a .DEPS.git deps file. Along the way, we also test that if both DEPS and .DEPS.git are present, that gclient does not read the DEPS file. This will reliably catch bugs where gclient is always hitting the wrong file (DEPS). Verifies gclient respects fallback to DEPS upon missing deps file. Verifies gclient works fine with DEPS without allowed_hosts. Verifies gclient works fine with DEPS with proper allowed_hosts. Verifies gclient works fine with DEPS with proper allowed_hosts. Verifies gclient fails with defined but empty allowed_hosts. Verifies gclient fails with defined but non-iterable allowed_hosts. Verifies something. Verifies gclient allow multiple cipd deps under same directory.
| 2.242146
| 2
|
tests/test_bktree.py
|
agarwalutkarsh554/imagededup
| 4,100
|
6627680
|
<gh_stars>1000+
from collections import OrderedDict
from imagededup.methods.hashing import Hashing
from imagededup.handlers.search.bktree import BKTree, BkTreeNode
# Test BkTreeNode
def initialize_for_bktree():
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'A', 'd': 'F', 'e': '2', 'f': '6', 'g': '7', 'h': 'E'}
)
dist_func = Hashing.hamming_distance
return hash_dict, dist_func
def test_bktreenode_correct_initialization():
node_name, node_value, parent_name = 'test_node', '1aef', None
node = BkTreeNode(node_name, node_value, parent_name)
assert node.node_name == 'test_node'
assert node.node_value == '1aef'
assert node.parent_name is None
assert len(node.children) == 0
# test BKTree class
def test_insert_tree():
# initialize root node and add 1 new node, check it goes as root's child and has it's parent as root
_, dist_func = initialize_for_bktree()
hash_dict = {'a': '9', 'b': 'D'}
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert 'b' in list(bk.dict_all['a'].children.keys())
assert bk.dict_all['b'].parent_name == 'a'
def test_insert_tree_collision():
# initialize root node, add 1 new node and enter another node with same distance from root, check it goes not as
# root's child but the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': '8'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 1
assert 'c' in list(bk.dict_all['b'].children.keys())
def test_insert_tree_different_nodes():
# initialize root node, add 1 new node and enter another node with different distance from root, check it goes as
# root's child and not as the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 2
assert set(['b', 'c']) <= set(bk.dict_all[bk.ROOT].children.keys())
def test_insert_tree_check_distance():
# initialize root node, add 1 new node and enter another node with different distance from root, check that the
# distance recorded in the root's children dictionary is as expected
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert bk.dict_all[bk.ROOT].children['b'] == 1
assert bk.dict_all[bk.ROOT].children['c'] == 2
def test_construct_tree():
# Input a complete tree and check for each node the children and parents
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
# check root
assert bk.ROOT == 'a'
# check that expected leaf nodes have no children (they're actually leaf nodes)
leaf_nodes = set(
[k for k in bk.dict_all.keys() if len(bk.dict_all[k].children) == 0]
)
expected_leaf_nodes = set(['b', 'd', 'f', 'h'])
assert leaf_nodes == expected_leaf_nodes
# check that root node ('a') has 4 children
assert len(bk.dict_all[bk.ROOT].children) == 4
# check that 'c' has 'd' as it's child at distance 2
assert bk.dict_all['c'].children['d'] == 2
def test_search():
# Input a tree and send a search query, check whether correct number of retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert len(valid_retrievals) == 5
def test_search_correctness():
# Input a tree and send a search query, check whether correct retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert set([i[0] for i in valid_retrievals]) == set(['a', 'f', 'g', 'd', 'b'])
def test_search_zero_tolerance():
# Input a tree and send a search query, check whether zero retrievals are returned for zero tolerance
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=0)
assert len(valid_retrievals) == 0
def test_search_dist():
# Input a tree and send a search query, check whether correct distance for a retrieval is returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert [i for i in valid_retrievals if i[0] == 'a'][0][1] == 2
def test_get_next_candidates_valid():
# Give a partial tree as input and check that for a query, expected candidates and validity flag are obtained
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, validity, dist = bk._get_next_candidates(
query, bk.dict_all[bk.ROOT], tolerance=2
)
candidates = set(candidates)
assert candidates <= set(['b', 'c', 'e', 'f'])
assert validity
def test_get_next_candidates_invalid():
# Give a tree as input and check that for a query, validity flag is 0
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
_, validity, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
assert not validity
def test_tolerance_affects_retrievals():
# Give a partial tree as input and check that for a query, increased tolerance gives more retrievals as expected for
# the input tree
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
low_tolerance_candidate_len = len(candidates)
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=2)
high_tolerance_candidate_len = len(candidates)
assert high_tolerance_candidate_len > low_tolerance_candidate_len
|
from collections import OrderedDict
from imagededup.methods.hashing import Hashing
from imagededup.handlers.search.bktree import BKTree, BkTreeNode
# Test BkTreeNode
def initialize_for_bktree():
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'A', 'd': 'F', 'e': '2', 'f': '6', 'g': '7', 'h': 'E'}
)
dist_func = Hashing.hamming_distance
return hash_dict, dist_func
def test_bktreenode_correct_initialization():
node_name, node_value, parent_name = 'test_node', '1aef', None
node = BkTreeNode(node_name, node_value, parent_name)
assert node.node_name == 'test_node'
assert node.node_value == '1aef'
assert node.parent_name is None
assert len(node.children) == 0
# test BKTree class
def test_insert_tree():
# initialize root node and add 1 new node, check it goes as root's child and has it's parent as root
_, dist_func = initialize_for_bktree()
hash_dict = {'a': '9', 'b': 'D'}
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert 'b' in list(bk.dict_all['a'].children.keys())
assert bk.dict_all['b'].parent_name == 'a'
def test_insert_tree_collision():
# initialize root node, add 1 new node and enter another node with same distance from root, check it goes not as
# root's child but the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': '8'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 1
assert 'c' in list(bk.dict_all['b'].children.keys())
def test_insert_tree_different_nodes():
# initialize root node, add 1 new node and enter another node with different distance from root, check it goes as
# root's child and not as the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 2
assert set(['b', 'c']) <= set(bk.dict_all[bk.ROOT].children.keys())
def test_insert_tree_check_distance():
# initialize root node, add 1 new node and enter another node with different distance from root, check that the
# distance recorded in the root's children dictionary is as expected
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert bk.dict_all[bk.ROOT].children['b'] == 1
assert bk.dict_all[bk.ROOT].children['c'] == 2
def test_construct_tree():
# Input a complete tree and check for each node the children and parents
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
# check root
assert bk.ROOT == 'a'
# check that expected leaf nodes have no children (they're actually leaf nodes)
leaf_nodes = set(
[k for k in bk.dict_all.keys() if len(bk.dict_all[k].children) == 0]
)
expected_leaf_nodes = set(['b', 'd', 'f', 'h'])
assert leaf_nodes == expected_leaf_nodes
# check that root node ('a') has 4 children
assert len(bk.dict_all[bk.ROOT].children) == 4
# check that 'c' has 'd' as it's child at distance 2
assert bk.dict_all['c'].children['d'] == 2
def test_search():
# Input a tree and send a search query, check whether correct number of retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert len(valid_retrievals) == 5
def test_search_correctness():
# Input a tree and send a search query, check whether correct retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert set([i[0] for i in valid_retrievals]) == set(['a', 'f', 'g', 'd', 'b'])
def test_search_zero_tolerance():
# Input a tree and send a search query, check whether zero retrievals are returned for zero tolerance
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=0)
assert len(valid_retrievals) == 0
def test_search_dist():
# Input a tree and send a search query, check whether correct distance for a retrieval is returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert [i for i in valid_retrievals if i[0] == 'a'][0][1] == 2
def test_get_next_candidates_valid():
# Give a partial tree as input and check that for a query, expected candidates and validity flag are obtained
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, validity, dist = bk._get_next_candidates(
query, bk.dict_all[bk.ROOT], tolerance=2
)
candidates = set(candidates)
assert candidates <= set(['b', 'c', 'e', 'f'])
assert validity
def test_get_next_candidates_invalid():
# Give a tree as input and check that for a query, validity flag is 0
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
_, validity, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
assert not validity
def test_tolerance_affects_retrievals():
# Give a partial tree as input and check that for a query, increased tolerance gives more retrievals as expected for
# the input tree
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
low_tolerance_candidate_len = len(candidates)
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=2)
high_tolerance_candidate_len = len(candidates)
assert high_tolerance_candidate_len > low_tolerance_candidate_len
|
en
| 0.932692
|
# Test BkTreeNode # test BKTree class # initialize root node and add 1 new node, check it goes as root's child and has it's parent as root # initialize root node, add 1 new node and enter another node with same distance from root, check it goes not as # root's child but the other node's child # to guarantee that 'a' is the root of the tree # initialize root node, add 1 new node and enter another node with different distance from root, check it goes as # root's child and not as the other node's child # to guarantee that 'a' is the root of the tree # initialize root node, add 1 new node and enter another node with different distance from root, check that the # distance recorded in the root's children dictionary is as expected # to guarantee that 'a' is the root of the tree # Input a complete tree and check for each node the children and parents # check root # check that expected leaf nodes have no children (they're actually leaf nodes) # check that root node ('a') has 4 children # check that 'c' has 'd' as it's child at distance 2 # Input a tree and send a search query, check whether correct number of retrievals are returned # Input a tree and send a search query, check whether correct retrievals are returned # Input a tree and send a search query, check whether zero retrievals are returned for zero tolerance # Input a tree and send a search query, check whether correct distance for a retrieval is returned # Give a partial tree as input and check that for a query, expected candidates and validity flag are obtained # Give a tree as input and check that for a query, validity flag is 0 # Give a partial tree as input and check that for a query, increased tolerance gives more retrievals as expected for # the input tree
| 3.255184
| 3
|
src/bandersnatch_storage_plugins/filesystem.py
|
turinggirl/bandersnatch
| 0
|
6627681
|
<filename>src/bandersnatch_storage_plugins/filesystem.py
import contextlib
import filecmp
import hashlib
import logging
import os
import pathlib
import shutil
import tempfile
from typing import IO, Any, Dict, Generator, List, Optional, Type, Union
import filelock
from bandersnatch.storage import PATH_TYPES, StoragePlugin
logger = logging.getLogger("bandersnatch")
class FilesystemStorage(StoragePlugin):
name = "filesystem"
PATH_BACKEND: Type[pathlib.Path] = pathlib.Path
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_lock(self, path: str = None) -> filelock.FileLock:
"""
Retrieve the appropriate `FileLock` backend for this storage plugin
:param str path: The path to use for locking
:return: A `FileLock` backend for obtaining locks
:rtype: SwiftFileLock
"""
if path is None:
path = self.mirror_base_path.joinpath(self.flock_path).as_posix()
logger.debug(f"Retrieving FileLock instance @ {path}")
return filelock.FileLock(path)
def walk(self, root: PATH_TYPES, dirs: bool = True) -> List[pathlib.Path]:
if not isinstance(root, pathlib.Path):
root = pathlib.Path(str(root))
results: List[pathlib.Path] = []
for pth in root.iterdir():
if pth.is_dir():
if dirs:
results.append(pth)
for subpath in self.walk(pth, dirs=dirs):
results.append(pth / subpath)
else:
results.append(pth)
return results
def find(self, root: PATH_TYPES, dirs: bool = True) -> str:
"""A test helper simulating 'find'.
Iterates over directories and filenames, given as relative paths to the
root.
"""
results = self.walk(root, dirs=dirs)
results.sort()
return "\n".join(str(result.relative_to(root)) for result in results)
@contextlib.contextmanager
def rewrite(
self, filepath: PATH_TYPES, mode: str = "w", **kw: Any
) -> Generator[IO, None, None]:
"""Rewrite an existing file atomically to avoid programs running in
parallel to have race conditions while reading."""
# TODO: Account for alternative backends
if isinstance(filepath, str):
base_dir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
else:
base_dir = str(filepath.parent)
filename = filepath.name
# Change naming format to be more friendly with distributed POSIX
# filesystems like GlusterFS that hash based on filename
# GlusterFS ignore '.' at the start of filenames and this avoid rehashing
with tempfile.NamedTemporaryFile(
mode=mode, prefix=f".{filename}.", delete=False, dir=base_dir, **kw
) as f:
filepath_tmp = f.name
yield f
if not self.exists(filepath_tmp):
# Allow our clients to remove the file in case it doesn't want it to be
# put in place actually but also doesn't want to error out.
return
os.chmod(filepath_tmp, 0o100644)
logger.debug(
f"Writing temporary file {filepath_tmp} to target destination: {filepath!s}"
)
self.copy_file(filepath_tmp, filepath)
logger.debug(f"Deleting temporary file: {filepath_tmp}")
self.delete_file(filepath_tmp)
@contextlib.contextmanager
def update_safe(self, filename: str, **kw: Any) -> Generator[IO, None, None]:
"""Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated.
"""
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename),
delete=False,
prefix=f"{os.path.basename(filename)}.",
**kw,
) as tf:
if self.exists(filename):
os.chmod(tf.name, os.stat(filename).st_mode & 0o7777)
tf.has_changed = False # type: ignore
yield tf
if not self.exists(tf.name):
return
filename_tmp = tf.name
if self.exists(filename) and self.compare_files(filename, filename_tmp):
logger.debug(f"File not changed...deleting temporary file: {filename_tmp}")
os.unlink(filename_tmp)
else:
logger.debug(f"Modifying destination: {filename!s} with: {filename_tmp}")
self.copy_file(filename_tmp, filename)
logger.debug(f"Deleting temporary file: {filename_tmp}")
self.delete_file(filename_tmp)
def compare_files(self, file1: PATH_TYPES, file2: PATH_TYPES) -> bool:
"""Compare two files, returning true if they are the same and False if not."""
return filecmp.cmp(str(file1), str(file2), shallow=False)
def copy_file(self, source: PATH_TYPES, dest: PATH_TYPES) -> None:
"""Copy a file from **source** to **dest**"""
if not self.exists(source):
raise FileNotFoundError(source)
shutil.copy(source, dest)
return
def write_file(self, path: PATH_TYPES, contents: Union[str, bytes]) -> None:
"""Write data to the provided path. If **contents** is a string, the file will
be opened and written in "r" + "utf-8" mode, if bytes are supplied it will be
accessed using "rb" mode (i.e. binary write)."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
if isinstance(contents, str):
path.write_text(contents)
else:
path.write_bytes(contents)
@contextlib.contextmanager
def open_file( # noqa
self, path: PATH_TYPES, text: bool = True, encoding: str = "utf-8"
) -> Generator[IO, None, None]:
"""Yield a file context to iterate over. If text is true, open the file with
'rb' mode specified."""
mode = "r" if text else "rb"
kwargs: Dict[str, str] = {}
if text:
kwargs["encoding"] = encoding
with open(path, mode=mode, **kwargs) as fh: # type: ignore
yield fh
def read_file(
self,
path: PATH_TYPES,
text=True,
encoding: str = "utf-8",
errors: Optional[str] = None,
) -> Union[str, bytes]:
"""Return the contents of the requested file, either a a bytestring or a unicode
string depending on whether **text** is True"""
with self.open_file(path, text=text, encoding=encoding) as fh:
contents = fh.read()
return contents
def delete_file(self, path: PATH_TYPES, dry_run: bool = False) -> int:
"""Delete the provided path, recursively if necessary."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
log_prefix = "[DRY RUN] " if dry_run else ""
logger.info(f"{log_prefix}Removing file: {path!s}")
if not dry_run:
path.unlink()
return 0
def mkdir(
self, path: PATH_TYPES, exist_ok: bool = False, parents: bool = False
) -> None:
"""Create the provided directory"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.mkdir(exist_ok=exist_ok, parents=parents)
def rmdir(
self,
path: PATH_TYPES,
recurse: bool = False,
force: bool = False,
ignore_errors: bool = False,
dry_run: bool = False,
) -> int:
"""Remove the directory. If recurse is True, allow removing empty children.
If force is true, remove contents destructively."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
log_prefix = "[DRY RUN] " if dry_run else ""
if force:
logger.info(f"{log_prefix}Forcing removal of files under {path!s}")
if not dry_run:
shutil.rmtree(path, ignore_errors=ignore_errors)
return 0
if recurse:
for subdir in path.iterdir():
if not subdir.is_dir():
continue
logger.info(f"{log_prefix}Removing directory: {subdir!s}")
if not dry_run:
rc = self.rmdir(
subdir,
recurse=recurse,
force=force,
ignore_errors=ignore_errors,
)
if rc != 0:
return rc
logger.info(f"{log_prefix}Removing directory: {path!s}")
if not dry_run:
path.rmdir()
return 0
def exists(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path exists"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.exists()
def is_dir(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path is a directory."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.is_dir()
def is_file(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path is a file."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.is_file()
def get_hash(self, path: PATH_TYPES, function: str = "sha256") -> str:
h = getattr(hashlib, function)()
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
logger.debug(
f"Opening {path.as_posix()} in binary mode for hash calculation..."
)
logger.debug(f"Contents: {path.read_bytes()!s}")
with open(path.absolute().as_posix(), "rb") as f:
for chunk in iter(lambda: f.read(128 * 1024), b""):
logger.debug(f"Read chunk: {chunk!s}")
h.update(chunk)
digest = h.hexdigest()
logger.debug(f"Calculated digest: {digest!s}")
return h.hexdigest()
|
<filename>src/bandersnatch_storage_plugins/filesystem.py
import contextlib
import filecmp
import hashlib
import logging
import os
import pathlib
import shutil
import tempfile
from typing import IO, Any, Dict, Generator, List, Optional, Type, Union
import filelock
from bandersnatch.storage import PATH_TYPES, StoragePlugin
logger = logging.getLogger("bandersnatch")
class FilesystemStorage(StoragePlugin):
name = "filesystem"
PATH_BACKEND: Type[pathlib.Path] = pathlib.Path
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_lock(self, path: str = None) -> filelock.FileLock:
"""
Retrieve the appropriate `FileLock` backend for this storage plugin
:param str path: The path to use for locking
:return: A `FileLock` backend for obtaining locks
:rtype: SwiftFileLock
"""
if path is None:
path = self.mirror_base_path.joinpath(self.flock_path).as_posix()
logger.debug(f"Retrieving FileLock instance @ {path}")
return filelock.FileLock(path)
def walk(self, root: PATH_TYPES, dirs: bool = True) -> List[pathlib.Path]:
if not isinstance(root, pathlib.Path):
root = pathlib.Path(str(root))
results: List[pathlib.Path] = []
for pth in root.iterdir():
if pth.is_dir():
if dirs:
results.append(pth)
for subpath in self.walk(pth, dirs=dirs):
results.append(pth / subpath)
else:
results.append(pth)
return results
def find(self, root: PATH_TYPES, dirs: bool = True) -> str:
"""A test helper simulating 'find'.
Iterates over directories and filenames, given as relative paths to the
root.
"""
results = self.walk(root, dirs=dirs)
results.sort()
return "\n".join(str(result.relative_to(root)) for result in results)
@contextlib.contextmanager
def rewrite(
self, filepath: PATH_TYPES, mode: str = "w", **kw: Any
) -> Generator[IO, None, None]:
"""Rewrite an existing file atomically to avoid programs running in
parallel to have race conditions while reading."""
# TODO: Account for alternative backends
if isinstance(filepath, str):
base_dir = os.path.dirname(filepath)
filename = os.path.basename(filepath)
else:
base_dir = str(filepath.parent)
filename = filepath.name
# Change naming format to be more friendly with distributed POSIX
# filesystems like GlusterFS that hash based on filename
# GlusterFS ignore '.' at the start of filenames and this avoid rehashing
with tempfile.NamedTemporaryFile(
mode=mode, prefix=f".{filename}.", delete=False, dir=base_dir, **kw
) as f:
filepath_tmp = f.name
yield f
if not self.exists(filepath_tmp):
# Allow our clients to remove the file in case it doesn't want it to be
# put in place actually but also doesn't want to error out.
return
os.chmod(filepath_tmp, 0o100644)
logger.debug(
f"Writing temporary file {filepath_tmp} to target destination: {filepath!s}"
)
self.copy_file(filepath_tmp, filepath)
logger.debug(f"Deleting temporary file: {filepath_tmp}")
self.delete_file(filepath_tmp)
@contextlib.contextmanager
def update_safe(self, filename: str, **kw: Any) -> Generator[IO, None, None]:
"""Rewrite a file atomically.
Clients are allowed to delete the tmpfile to signal that they don't
want to have it updated.
"""
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename),
delete=False,
prefix=f"{os.path.basename(filename)}.",
**kw,
) as tf:
if self.exists(filename):
os.chmod(tf.name, os.stat(filename).st_mode & 0o7777)
tf.has_changed = False # type: ignore
yield tf
if not self.exists(tf.name):
return
filename_tmp = tf.name
if self.exists(filename) and self.compare_files(filename, filename_tmp):
logger.debug(f"File not changed...deleting temporary file: {filename_tmp}")
os.unlink(filename_tmp)
else:
logger.debug(f"Modifying destination: {filename!s} with: {filename_tmp}")
self.copy_file(filename_tmp, filename)
logger.debug(f"Deleting temporary file: {filename_tmp}")
self.delete_file(filename_tmp)
def compare_files(self, file1: PATH_TYPES, file2: PATH_TYPES) -> bool:
"""Compare two files, returning true if they are the same and False if not."""
return filecmp.cmp(str(file1), str(file2), shallow=False)
def copy_file(self, source: PATH_TYPES, dest: PATH_TYPES) -> None:
"""Copy a file from **source** to **dest**"""
if not self.exists(source):
raise FileNotFoundError(source)
shutil.copy(source, dest)
return
def write_file(self, path: PATH_TYPES, contents: Union[str, bytes]) -> None:
"""Write data to the provided path. If **contents** is a string, the file will
be opened and written in "r" + "utf-8" mode, if bytes are supplied it will be
accessed using "rb" mode (i.e. binary write)."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
if isinstance(contents, str):
path.write_text(contents)
else:
path.write_bytes(contents)
@contextlib.contextmanager
def open_file( # noqa
self, path: PATH_TYPES, text: bool = True, encoding: str = "utf-8"
) -> Generator[IO, None, None]:
"""Yield a file context to iterate over. If text is true, open the file with
'rb' mode specified."""
mode = "r" if text else "rb"
kwargs: Dict[str, str] = {}
if text:
kwargs["encoding"] = encoding
with open(path, mode=mode, **kwargs) as fh: # type: ignore
yield fh
def read_file(
self,
path: PATH_TYPES,
text=True,
encoding: str = "utf-8",
errors: Optional[str] = None,
) -> Union[str, bytes]:
"""Return the contents of the requested file, either a a bytestring or a unicode
string depending on whether **text** is True"""
with self.open_file(path, text=text, encoding=encoding) as fh:
contents = fh.read()
return contents
def delete_file(self, path: PATH_TYPES, dry_run: bool = False) -> int:
"""Delete the provided path, recursively if necessary."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
log_prefix = "[DRY RUN] " if dry_run else ""
logger.info(f"{log_prefix}Removing file: {path!s}")
if not dry_run:
path.unlink()
return 0
def mkdir(
self, path: PATH_TYPES, exist_ok: bool = False, parents: bool = False
) -> None:
"""Create the provided directory"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.mkdir(exist_ok=exist_ok, parents=parents)
def rmdir(
self,
path: PATH_TYPES,
recurse: bool = False,
force: bool = False,
ignore_errors: bool = False,
dry_run: bool = False,
) -> int:
"""Remove the directory. If recurse is True, allow removing empty children.
If force is true, remove contents destructively."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
log_prefix = "[DRY RUN] " if dry_run else ""
if force:
logger.info(f"{log_prefix}Forcing removal of files under {path!s}")
if not dry_run:
shutil.rmtree(path, ignore_errors=ignore_errors)
return 0
if recurse:
for subdir in path.iterdir():
if not subdir.is_dir():
continue
logger.info(f"{log_prefix}Removing directory: {subdir!s}")
if not dry_run:
rc = self.rmdir(
subdir,
recurse=recurse,
force=force,
ignore_errors=ignore_errors,
)
if rc != 0:
return rc
logger.info(f"{log_prefix}Removing directory: {path!s}")
if not dry_run:
path.rmdir()
return 0
def exists(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path exists"""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.exists()
def is_dir(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path is a directory."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.is_dir()
def is_file(self, path: PATH_TYPES) -> bool:
"""Check whether the provided path is a file."""
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
return path.is_file()
def get_hash(self, path: PATH_TYPES, function: str = "sha256") -> str:
h = getattr(hashlib, function)()
if not isinstance(path, pathlib.Path):
path = pathlib.Path(path)
logger.debug(
f"Opening {path.as_posix()} in binary mode for hash calculation..."
)
logger.debug(f"Contents: {path.read_bytes()!s}")
with open(path.absolute().as_posix(), "rb") as f:
for chunk in iter(lambda: f.read(128 * 1024), b""):
logger.debug(f"Read chunk: {chunk!s}")
h.update(chunk)
digest = h.hexdigest()
logger.debug(f"Calculated digest: {digest!s}")
return h.hexdigest()
|
en
| 0.857198
|
Retrieve the appropriate `FileLock` backend for this storage plugin :param str path: The path to use for locking :return: A `FileLock` backend for obtaining locks :rtype: SwiftFileLock A test helper simulating 'find'. Iterates over directories and filenames, given as relative paths to the root. Rewrite an existing file atomically to avoid programs running in parallel to have race conditions while reading. # TODO: Account for alternative backends # Change naming format to be more friendly with distributed POSIX # filesystems like GlusterFS that hash based on filename # GlusterFS ignore '.' at the start of filenames and this avoid rehashing # Allow our clients to remove the file in case it doesn't want it to be # put in place actually but also doesn't want to error out. Rewrite a file atomically. Clients are allowed to delete the tmpfile to signal that they don't want to have it updated. # type: ignore Compare two files, returning true if they are the same and False if not. Copy a file from **source** to **dest** Write data to the provided path. If **contents** is a string, the file will be opened and written in "r" + "utf-8" mode, if bytes are supplied it will be accessed using "rb" mode (i.e. binary write). # noqa Yield a file context to iterate over. If text is true, open the file with 'rb' mode specified. # type: ignore Return the contents of the requested file, either a a bytestring or a unicode string depending on whether **text** is True Delete the provided path, recursively if necessary. Create the provided directory Remove the directory. If recurse is True, allow removing empty children. If force is true, remove contents destructively. Check whether the provided path exists Check whether the provided path is a directory. Check whether the provided path is a file.
| 2.392945
| 2
|
Problemset/qing-wa-tiao-tai-jie-wen-ti-lcof/qing-wa-tiao-tai-jie-wen-ti-lcof.py
|
worldwonderer/algorithm
| 1
|
6627682
|
# @Title: 青蛙跳台阶问题 (青蛙跳台阶问题 LCOF)
# @Author: 18015528893
# @Date: 2020-09-28 21:53:14
# @Runtime: 36 ms
# @Memory: 13.1 MB
class Solution:
def numWays(self, n: int) -> int:
a, b = 1, 1
for _ in range(n):
a, b = b, a+b
return a % 1000000007
|
# @Title: 青蛙跳台阶问题 (青蛙跳台阶问题 LCOF)
# @Author: 18015528893
# @Date: 2020-09-28 21:53:14
# @Runtime: 36 ms
# @Memory: 13.1 MB
class Solution:
def numWays(self, n: int) -> int:
a, b = 1, 1
for _ in range(n):
a, b = b, a+b
return a % 1000000007
|
en
| 0.19918
|
# @Title: 青蛙跳台阶问题 (青蛙跳台阶问题 LCOF) # @Author: 18015528893 # @Date: 2020-09-28 21:53:14 # @Runtime: 36 ms # @Memory: 13.1 MB
| 2.908117
| 3
|
tests/test_minifier.py
|
chinghwayu/mimesis
| 2,619
|
6627683
|
<filename>tests/test_minifier.py<gh_stars>1000+
import pytest
from minifier import Minimizer, human_repr
def test_human_repr():
assert human_repr(0) == "0.0B"
assert human_repr(1) == "1.0B"
assert human_repr(100.1) == "100.1B"
assert human_repr(1024) == "1.0KB"
assert human_repr(1024 * 100.1) == "100.1KB"
assert human_repr(1024 ** 2) == "1.0MB"
assert human_repr(1024 ** 2 * 100.1) == "100.1MB"
def test_human_repr_cant_handle_gigabytes():
assert human_repr(1024 ** 3) == "1.0"
def test_minimizer_minifies_file(tmp_path):
file = tmp_path / "spam.json"
file.write_text('{\n "spam": [\n "eggs"\n ]\n}')
minifier = Minimizer(files=(file,))
minifier.run()
assert file.read_text() == '{"spam":["eggs"]}'
@pytest.fixture
def disable_colorama_codes(monkeypatch):
from colorama import Fore, Style
monkeypatch.setattr(Style, "RESET_ALL", "")
for name in vars(Fore).keys():
if name.upper() == name:
monkeypatch.setattr(Fore, name, "")
@pytest.mark.usefixtures(disable_colorama_codes.__name__)
def test_minimizer_reports_to_stdout(capsys, tmp_path):
file = tmp_path / "spam.json"
file.write_text("{\n}")
minifier = Minimizer(files=(file,))
minifier.run()
lines = capsys.readouterr().out.split("\n")
assert lines[0].strip().endswith("3.0B -> 2.0B")
assert lines[2] == "Total: 3.0B -> 2.0B. Compressed: 1.0B"
|
<filename>tests/test_minifier.py<gh_stars>1000+
import pytest
from minifier import Minimizer, human_repr
def test_human_repr():
assert human_repr(0) == "0.0B"
assert human_repr(1) == "1.0B"
assert human_repr(100.1) == "100.1B"
assert human_repr(1024) == "1.0KB"
assert human_repr(1024 * 100.1) == "100.1KB"
assert human_repr(1024 ** 2) == "1.0MB"
assert human_repr(1024 ** 2 * 100.1) == "100.1MB"
def test_human_repr_cant_handle_gigabytes():
assert human_repr(1024 ** 3) == "1.0"
def test_minimizer_minifies_file(tmp_path):
file = tmp_path / "spam.json"
file.write_text('{\n "spam": [\n "eggs"\n ]\n}')
minifier = Minimizer(files=(file,))
minifier.run()
assert file.read_text() == '{"spam":["eggs"]}'
@pytest.fixture
def disable_colorama_codes(monkeypatch):
from colorama import Fore, Style
monkeypatch.setattr(Style, "RESET_ALL", "")
for name in vars(Fore).keys():
if name.upper() == name:
monkeypatch.setattr(Fore, name, "")
@pytest.mark.usefixtures(disable_colorama_codes.__name__)
def test_minimizer_reports_to_stdout(capsys, tmp_path):
file = tmp_path / "spam.json"
file.write_text("{\n}")
minifier = Minimizer(files=(file,))
minifier.run()
lines = capsys.readouterr().out.split("\n")
assert lines[0].strip().endswith("3.0B -> 2.0B")
assert lines[2] == "Total: 3.0B -> 2.0B. Compressed: 1.0B"
|
none
| 1
| 2.277275
| 2
|
|
ML/datasets/reviewsData/linecounter.py
|
JamesG3/Review-Based_Amazon_SearchEngine
| 0
|
6627684
|
<gh_stars>0
file = open("item_dedup.json",'r')
counter = 0
for line in file:
print line
if counter == 100:
break
counter += 1
if counter%1000000 == 0:
print counter/1000000
print counter
file.close()
|
file = open("item_dedup.json",'r')
counter = 0
for line in file:
print line
if counter == 100:
break
counter += 1
if counter%1000000 == 0:
print counter/1000000
print counter
file.close()
|
none
| 1
| 2.706863
| 3
|
|
main-node/model/model_abs.py
|
Valavanca/benchmark
| 0
|
6627685
|
from abc import ABC, abstractmethod
class Model(ABC):
@abstractmethod
def build_model(self): pass
@abstractmethod
def validate_model(self): pass
@abstractmethod
def predict_solution(self): pass
@abstractmethod
def validate_solution(self): pass
@abstractmethod
def get_result(self): pass
|
from abc import ABC, abstractmethod
class Model(ABC):
@abstractmethod
def build_model(self): pass
@abstractmethod
def validate_model(self): pass
@abstractmethod
def predict_solution(self): pass
@abstractmethod
def validate_solution(self): pass
@abstractmethod
def get_result(self): pass
|
none
| 1
| 2.998837
| 3
|
|
automateYT/__init__.py
|
umutambyi-gad/automateYT
| 2
|
6627686
|
"""
automateYT is lightweight library for automating to download youtube videos, subtitles (if available) and playlist.
"""
__title__ = "automateYT"
__author__ = "<NAME>"
__copyright__ = "Copyright 2021 by <NAME>"
__license__ = 'MIT'
__version__ = '1.0.0'
__all__ = ['Timing', 'Automate']
from automateYT.__main__ import Automate
from automateYT.__main__ import Timing
|
"""
automateYT is lightweight library for automating to download youtube videos, subtitles (if available) and playlist.
"""
__title__ = "automateYT"
__author__ = "<NAME>"
__copyright__ = "Copyright 2021 by <NAME>"
__license__ = 'MIT'
__version__ = '1.0.0'
__all__ = ['Timing', 'Automate']
from automateYT.__main__ import Automate
from automateYT.__main__ import Timing
|
en
| 0.725917
|
automateYT is lightweight library for automating to download youtube videos, subtitles (if available) and playlist.
| 1.200423
| 1
|
problem0176.py
|
kmarcini/Project-Euler-Python
| 0
|
6627687
|
<gh_stars>0
###########################
#
# #176 Right-angled triangles that share a cathetus - Project Euler
# https://projecteuler.net/problem=176
#
# Code by <NAME>
#
###########################
|
###########################
#
# #176 Right-angled triangles that share a cathetus - Project Euler
# https://projecteuler.net/problem=176
#
# Code by <NAME>
#
###########################
|
de
| 0.287971
|
########################### # # #176 Right-angled triangles that share a cathetus - Project Euler # https://projecteuler.net/problem=176 # # Code by <NAME> # ###########################
| 1.591863
| 2
|
Assets/ScriptsExternos/flana_math.py
|
AlberLC/rocket-training
| 0
|
6627688
|
import math
class Vector2:
def __init__(self, x, y):
self.x = x
self.y = y
def __mul__(self, other) -> (float, 'Vector2'):
if other is None:
other = Vector2(1, 0)
if type(other) is Vector2:
return self.x * other.x + self.y * other.y
else:
return Vector2(self.x * other, self.y * other)
def __rmul__(self, other) -> (float, 'Vector2'):
return self * other
def __str__(self):
return f'({self.x}, {self.y})'
def __sub__(self, other) -> 'Vector2':
return Vector2(self.x - other.x, self.y - other.y)
@property
def inverse(self) -> 'Vector2':
return self * -1
@property
def module(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2)
@property
def normalized(self) -> 'Vector2':
module = self.module
return Vector2(self.x / module, self.y / module)
def angle_deg_with(self, other: 'Vector2' = None) -> float:
return math.degrees(self.angle_rad_with(other))
def angle_rad_with(self, other: 'Vector2' = None) -> float:
if other is None:
other = Vector2(1, 0)
angle = math.acos(self * other / (self.module * other.module))
if self.y < 0:
angle = 2 * math.pi - angle
return angle
def direccion_a(self, other) -> 'Vector2':
return Vector2(other.x - self.x, other.y - self.y)
def distancia_a(self, other) -> float:
return (other - self).module
def rotate_deg(self, angle):
try:
rads = math.radians(angle)
module = self.module
x = self.x / module
y = self.y / module
except ZeroDivisionError:
return Vector2(0, 0)
return Vector2(x * math.cos(rads) - y * math.sin(rads), x * math.sin(rads) + y * math.cos(rads)) * 20
def rotation_deg(self) -> float:
return math.degrees(self.rotation_rad())
def rotation_rad(self) -> float:
rotation = self.angle_rad_with()
return rotation
def validate_angle_deg_with(self, other: 'Vector2', max_angle) -> bool:
angle = self.angle_deg_with(other)
return angle <= max_angle or 360 - angle <= max_angle
|
import math
class Vector2:
def __init__(self, x, y):
self.x = x
self.y = y
def __mul__(self, other) -> (float, 'Vector2'):
if other is None:
other = Vector2(1, 0)
if type(other) is Vector2:
return self.x * other.x + self.y * other.y
else:
return Vector2(self.x * other, self.y * other)
def __rmul__(self, other) -> (float, 'Vector2'):
return self * other
def __str__(self):
return f'({self.x}, {self.y})'
def __sub__(self, other) -> 'Vector2':
return Vector2(self.x - other.x, self.y - other.y)
@property
def inverse(self) -> 'Vector2':
return self * -1
@property
def module(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2)
@property
def normalized(self) -> 'Vector2':
module = self.module
return Vector2(self.x / module, self.y / module)
def angle_deg_with(self, other: 'Vector2' = None) -> float:
return math.degrees(self.angle_rad_with(other))
def angle_rad_with(self, other: 'Vector2' = None) -> float:
if other is None:
other = Vector2(1, 0)
angle = math.acos(self * other / (self.module * other.module))
if self.y < 0:
angle = 2 * math.pi - angle
return angle
def direccion_a(self, other) -> 'Vector2':
return Vector2(other.x - self.x, other.y - self.y)
def distancia_a(self, other) -> float:
return (other - self).module
def rotate_deg(self, angle):
try:
rads = math.radians(angle)
module = self.module
x = self.x / module
y = self.y / module
except ZeroDivisionError:
return Vector2(0, 0)
return Vector2(x * math.cos(rads) - y * math.sin(rads), x * math.sin(rads) + y * math.cos(rads)) * 20
def rotation_deg(self) -> float:
return math.degrees(self.rotation_rad())
def rotation_rad(self) -> float:
rotation = self.angle_rad_with()
return rotation
def validate_angle_deg_with(self, other: 'Vector2', max_angle) -> bool:
angle = self.angle_deg_with(other)
return angle <= max_angle or 360 - angle <= max_angle
|
none
| 1
| 3.626343
| 4
|
|
core/libs/view_helpers/__init__.py
|
near-feign/pcapdb
| 244
|
6627689
|
def format_errors(errors):
"""Format serializer errors to conform to our messaging format. (ie, sending a list of
messages or a single message under 'success', 'info', 'warning', or 'failure').
:param errors: An error dictionary as produced by rest_framework serializers.
:returns: A list of messages."""
out_errors = []
for key in errors:
for msg in errors[key]:
if key != 'non_field_errors':
out_errors.append('{}: {}'.format(key, msg))
else:
out_errors.append(msg)
return out_errors
|
def format_errors(errors):
"""Format serializer errors to conform to our messaging format. (ie, sending a list of
messages or a single message under 'success', 'info', 'warning', or 'failure').
:param errors: An error dictionary as produced by rest_framework serializers.
:returns: A list of messages."""
out_errors = []
for key in errors:
for msg in errors[key]:
if key != 'non_field_errors':
out_errors.append('{}: {}'.format(key, msg))
else:
out_errors.append(msg)
return out_errors
|
en
| 0.55546
|
Format serializer errors to conform to our messaging format. (ie, sending a list of messages or a single message under 'success', 'info', 'warning', or 'failure'). :param errors: An error dictionary as produced by rest_framework serializers. :returns: A list of messages.
| 3.079606
| 3
|
python/GlobalMemoryInterface.py
|
KastnerRG/tinker
| 7
|
6627690
|
<filename>python/GlobalMemoryInterface.py
# ----------------------------------------------------------------------
# Copyright (c) 2016, The Regents of the University of California All
# rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
import xml.etree.ElementTree as ET
import DDR, QDR
import Tinker, GroupMemoryInterface
import sys
from Interface import *
class GlobalMemoryInterface(Interface):
_C_INTERFACE_KEYS = set(["interfaces", "type"])
_C_INTERFACE_TYPES = set(["DMA"])
_C_INTERFACE_ROLES = set(["primary", "secondary"])
_C_INTERFACE_SIZE_RANGE = (1<<17, 1<<64)
def __init__(self, desc):
"""Construct a generic Interface Object
Arguments:
desc -- a dictionary object containing a description of this
interface
"""
super(GlobalMemoryInterface,self).__init__(desc)
@classmethod
def parse(cls, desc):
"""
Parse the description of this IP object from an dictionary
return a defaultdictionary built from the key-value pairs.
Arguments:
e -- An element tree element containing the description of this
object
"""
d = super(GlobalMemoryInterface,cls).parse(desc)
d["interfaces"] = cls.parse_interfaces(desc)
d["quantity"] = cls.parse_quantity(desc)
for i in d["interfaces"]:
d[i] = GroupMemoryInterface.GroupMemoryInterface(desc[i], i)
return d
@classmethod
def validate(cls, d):
"""
Validate the parameters that describe the intrinsic settings of
this Interface
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
super(GlobalMemoryInterface,cls).validate(d)
cls.check_interfaces(d)
cls.check_quantity(d)
cls.check_roles(d)
@classmethod
def check_roles(cls, d):
cls.check_interfaces(d)
ifs = d["interfaces"]
pid = None
spec = (d[ifs[0]].get("role") != None)
for i in ifs:
r = d[i].get("role")
if(r is None and spec is True):
Tinker.key_error("role",str(d[i]))
elif(r != None and spec is False):
print "In description:"
Tinker.print_description(d)
sys.exit("Roles must be specified for all Memory Interfaces, or none of them.")
elif(r != None and r not in cls._C_INTERFACE_ROLES):
Tinker.value_error_map("role", str(r), str(cls._C_INTERFACE_ROLES),
Tinker.tostr_dict())
elif(r != None and r == "primary" and pid != None):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! Two primary interfaces \"%s\" and \"%s\" found."
% (pid, i))
elif(r == "primary"):
pid = i
def implement(self, b):
"""
Implement the Interface described by this object using the Board
object describing the IP available on this board.
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
self.validate(self)
for i in self["interfaces"]:
self[i].implement(b["memory"])
self.__configure()
def __configure(self):
"""
Fill in any missing defaults in a high level description used to
configure this object
Arguments:
d -- A Description object, containing the possibly incomplete
parsed user description of a custom board
"""
base = 0
size = 0
for i in self["interfaces"]:
self[i].check_size(self[i])
sz = self[i]["size"]
size += sz
rem = 0
if(base % sz != 0):
rem = sz - (base % sz)
base += rem
self[i].set_base_address(base)
base += sz
self["size"] = size
# Default to min frequency to meet timing
min_freq = None
min_id = None
size = 0
# TODO: Names for Memory Interfaces (must be less than 32 char)
# Must have at least 128 KB of memory
for i in self["interfaces"]:
self[i].check_frequency(self[i])
f = self[i]["freq_mhz"]
if(min_freq is None or f < min_freq):
min_freq = f
min_id = i
n = 0
for i in self["interfaces"]:
if(i == min_id):
self['primary'] = i
self[i].set_role("primary")
self[i].set_config_addr(0x18)
else:
self[i].set_role("secondary")
self[i].set_config_addr(0x100 + n * 0x18)
n +=1
#TODO: Configuration address
def verify(self):
"""
Verify that this object can implement the high level description
Arguments:
d -- A Description object, containing the complete description
of a the IP configuration
"""
self.check_interfaces(self)
self.check_quantity(self)
self.check_roles(self)
self.check_size(self)
@classmethod
def check_size(cls, d):
sz = d.get("size")
sz_min = cls._C_INTERFACE_SIZE_RANGE[0]
sz_max = cls._C_INTERFACE_SIZE_RANGE[1]
if(sz is None):
Tinker.key_error("size", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(sz, sz_min, sz_max)):
Tinker.value_error_map("size", str(hex(sz)),
"Range(0x%x, 0x%x)" % (sz_min, sz_max),
Tinker.tostr_dict(d))
@classmethod
def parse_quantity(cls, desc):
ifs = cls.parse_interfaces(desc)
return len(ifs)
@classmethod
def parse_interfaces(cls, desc):
ifs = parse_list(desc, "interfaces")
if(ifs == []):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! A Global Memory must have more than one interface!")
for i in ifs:
if(ifs.count(i) > 1):
sys.exit("Error! Interface \"%s\" was not unique in list %s"
% (i, str(ifs)))
parse_dict(desc, i)
return ifs
@classmethod
def check_quantity(cls, d):
super(GlobalMemoryInterface,cls).check_quantity(d)
cls.check_interfaces(d)
ifs = cls.parse_interfaces(d)
q = parse_int(d, "quantity")
if(q != len(ifs)):
Tinker.value_error_map("quantity",str(q),str(ifs),
Tinker.tostr_dict(d))
@classmethod
def check_interfaces(cls,d):
ifs = parse_list(d, "interfaces")
if(ifs == []):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! A Global Memory must have more than one interface!")
for i in ifs:
if(ifs.count(i) > 1):
sys.exit("Error! Interface \"%s\" was not unique in list %s"
% (i, str(ifs)))
parse_dict(d,i)
d[i].validate(d[i])
def get_macros(self, version, verbose):
l = []
for i in self["interfaces"]:
l += self[i].get_macros(version, verbose)
return l
def get_pin_elements(self, version, verbose):
l = []
for i in self["interfaces"]:
l += self[i].get_pin_elements(version, verbose)
return l
def get_global_mem_elements(self, version, verbose):
l = []
for i in self["interfaces"]:
l += [self[i].get_global_mem_element(version, verbose)]
return l
def get_interface_elements(self, version, verbose):
pid = self["primary"]
# TODO: Check primary
return self[pid].get_interface_elements(version, verbose)
# TODO: Else, Error
def get_host_elements(self, version, verbose):
return []
|
<filename>python/GlobalMemoryInterface.py
# ----------------------------------------------------------------------
# Copyright (c) 2016, The Regents of the University of California All
# rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of The Regents of the University of California
# nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE
# UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
# ----------------------------------------------------------------------
import xml.etree.ElementTree as ET
import DDR, QDR
import Tinker, GroupMemoryInterface
import sys
from Interface import *
class GlobalMemoryInterface(Interface):
_C_INTERFACE_KEYS = set(["interfaces", "type"])
_C_INTERFACE_TYPES = set(["DMA"])
_C_INTERFACE_ROLES = set(["primary", "secondary"])
_C_INTERFACE_SIZE_RANGE = (1<<17, 1<<64)
def __init__(self, desc):
"""Construct a generic Interface Object
Arguments:
desc -- a dictionary object containing a description of this
interface
"""
super(GlobalMemoryInterface,self).__init__(desc)
@classmethod
def parse(cls, desc):
"""
Parse the description of this IP object from an dictionary
return a defaultdictionary built from the key-value pairs.
Arguments:
e -- An element tree element containing the description of this
object
"""
d = super(GlobalMemoryInterface,cls).parse(desc)
d["interfaces"] = cls.parse_interfaces(desc)
d["quantity"] = cls.parse_quantity(desc)
for i in d["interfaces"]:
d[i] = GroupMemoryInterface.GroupMemoryInterface(desc[i], i)
return d
@classmethod
def validate(cls, d):
"""
Validate the parameters that describe the intrinsic settings of
this Interface
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
super(GlobalMemoryInterface,cls).validate(d)
cls.check_interfaces(d)
cls.check_quantity(d)
cls.check_roles(d)
@classmethod
def check_roles(cls, d):
cls.check_interfaces(d)
ifs = d["interfaces"]
pid = None
spec = (d[ifs[0]].get("role") != None)
for i in ifs:
r = d[i].get("role")
if(r is None and spec is True):
Tinker.key_error("role",str(d[i]))
elif(r != None and spec is False):
print "In description:"
Tinker.print_description(d)
sys.exit("Roles must be specified for all Memory Interfaces, or none of them.")
elif(r != None and r not in cls._C_INTERFACE_ROLES):
Tinker.value_error_map("role", str(r), str(cls._C_INTERFACE_ROLES),
Tinker.tostr_dict())
elif(r != None and r == "primary" and pid != None):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! Two primary interfaces \"%s\" and \"%s\" found."
% (pid, i))
elif(r == "primary"):
pid = i
def implement(self, b):
"""
Implement the Interface described by this object using the Board
object describing the IP available on this board.
Arguments:
d -- A Description object, containing the parsed user description
of a custom board
"""
self.validate(self)
for i in self["interfaces"]:
self[i].implement(b["memory"])
self.__configure()
def __configure(self):
"""
Fill in any missing defaults in a high level description used to
configure this object
Arguments:
d -- A Description object, containing the possibly incomplete
parsed user description of a custom board
"""
base = 0
size = 0
for i in self["interfaces"]:
self[i].check_size(self[i])
sz = self[i]["size"]
size += sz
rem = 0
if(base % sz != 0):
rem = sz - (base % sz)
base += rem
self[i].set_base_address(base)
base += sz
self["size"] = size
# Default to min frequency to meet timing
min_freq = None
min_id = None
size = 0
# TODO: Names for Memory Interfaces (must be less than 32 char)
# Must have at least 128 KB of memory
for i in self["interfaces"]:
self[i].check_frequency(self[i])
f = self[i]["freq_mhz"]
if(min_freq is None or f < min_freq):
min_freq = f
min_id = i
n = 0
for i in self["interfaces"]:
if(i == min_id):
self['primary'] = i
self[i].set_role("primary")
self[i].set_config_addr(0x18)
else:
self[i].set_role("secondary")
self[i].set_config_addr(0x100 + n * 0x18)
n +=1
#TODO: Configuration address
def verify(self):
"""
Verify that this object can implement the high level description
Arguments:
d -- A Description object, containing the complete description
of a the IP configuration
"""
self.check_interfaces(self)
self.check_quantity(self)
self.check_roles(self)
self.check_size(self)
@classmethod
def check_size(cls, d):
sz = d.get("size")
sz_min = cls._C_INTERFACE_SIZE_RANGE[0]
sz_max = cls._C_INTERFACE_SIZE_RANGE[1]
if(sz is None):
Tinker.key_error("size", Tinker.tostr_dict(d))
if(not Tinker.is_in_range(sz, sz_min, sz_max)):
Tinker.value_error_map("size", str(hex(sz)),
"Range(0x%x, 0x%x)" % (sz_min, sz_max),
Tinker.tostr_dict(d))
@classmethod
def parse_quantity(cls, desc):
ifs = cls.parse_interfaces(desc)
return len(ifs)
@classmethod
def parse_interfaces(cls, desc):
ifs = parse_list(desc, "interfaces")
if(ifs == []):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! A Global Memory must have more than one interface!")
for i in ifs:
if(ifs.count(i) > 1):
sys.exit("Error! Interface \"%s\" was not unique in list %s"
% (i, str(ifs)))
parse_dict(desc, i)
return ifs
@classmethod
def check_quantity(cls, d):
super(GlobalMemoryInterface,cls).check_quantity(d)
cls.check_interfaces(d)
ifs = cls.parse_interfaces(d)
q = parse_int(d, "quantity")
if(q != len(ifs)):
Tinker.value_error_map("quantity",str(q),str(ifs),
Tinker.tostr_dict(d))
@classmethod
def check_interfaces(cls,d):
ifs = parse_list(d, "interfaces")
if(ifs == []):
print "In description:"
Tinker.print_description(d)
sys.exit("Error! A Global Memory must have more than one interface!")
for i in ifs:
if(ifs.count(i) > 1):
sys.exit("Error! Interface \"%s\" was not unique in list %s"
% (i, str(ifs)))
parse_dict(d,i)
d[i].validate(d[i])
def get_macros(self, version, verbose):
l = []
for i in self["interfaces"]:
l += self[i].get_macros(version, verbose)
return l
def get_pin_elements(self, version, verbose):
l = []
for i in self["interfaces"]:
l += self[i].get_pin_elements(version, verbose)
return l
def get_global_mem_elements(self, version, verbose):
l = []
for i in self["interfaces"]:
l += [self[i].get_global_mem_element(version, verbose)]
return l
def get_interface_elements(self, version, verbose):
pid = self["primary"]
# TODO: Check primary
return self[pid].get_interface_elements(version, verbose)
# TODO: Else, Error
def get_host_elements(self, version, verbose):
return []
|
en
| 0.684863
|
# ---------------------------------------------------------------------- # Copyright (c) 2016, The Regents of the University of California All # rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # * Neither the name of The Regents of the University of California # nor the names of its contributors may be used to endorse or # promote products derived from this software without specific # prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE # UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. # ---------------------------------------------------------------------- Construct a generic Interface Object Arguments: desc -- a dictionary object containing a description of this interface Parse the description of this IP object from an dictionary return a defaultdictionary built from the key-value pairs. Arguments: e -- An element tree element containing the description of this object Validate the parameters that describe the intrinsic settings of this Interface Arguments: d -- A Description object, containing the parsed user description of a custom board Implement the Interface described by this object using the Board object describing the IP available on this board. Arguments: d -- A Description object, containing the parsed user description of a custom board Fill in any missing defaults in a high level description used to configure this object Arguments: d -- A Description object, containing the possibly incomplete parsed user description of a custom board # Default to min frequency to meet timing # TODO: Names for Memory Interfaces (must be less than 32 char) # Must have at least 128 KB of memory #TODO: Configuration address Verify that this object can implement the high level description Arguments: d -- A Description object, containing the complete description of a the IP configuration # TODO: Check primary # TODO: Else, Error
| 1.331892
| 1
|
cottonformation/res/elasticloadbalancing.py
|
MacHu-GWU/cottonformation-project
| 5
|
6627691
|
<filename>cottonformation/res/elasticloadbalancing.py
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropLoadBalancerAccessLoggingPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AccessLoggingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled
- ``rp_S3BucketName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname
- ``p_EmitInterval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval
- ``p_S3BucketPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.AccessLoggingPolicy"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled"""
rp_S3BucketName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3BucketName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname"""
p_EmitInterval: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "EmitInterval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval"""
p_S3BucketPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "S3BucketPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix"""
@attr.s
class PropLoadBalancerHealthCheck(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.HealthCheck"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html
Property Document:
- ``rp_HealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold
- ``rp_Interval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval
- ``rp_Target``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target
- ``rp_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout
- ``rp_UnhealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.HealthCheck"
rp_HealthyThreshold: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "HealthyThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold"""
rp_Interval: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Interval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval"""
rp_Target: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Target"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target"""
rp_Timeout: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout"""
rp_UnhealthyThreshold: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "UnhealthyThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold"""
@attr.s
class PropLoadBalancerConnectionSettings(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionSettings"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html
Property Document:
- ``rp_IdleTimeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionSettings"
rp_IdleTimeout: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "IdleTimeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout"""
@attr.s
class PropLoadBalancerConnectionDrainingPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionDrainingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled
- ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionDrainingPolicy"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled"""
p_Timeout: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout"""
@attr.s
class PropLoadBalancerAppCookieStickinessPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AppCookieStickinessPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html
Property Document:
- ``rp_CookieName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename
- ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.AppCookieStickinessPolicy"
rp_CookieName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "CookieName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename"""
rp_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname"""
@attr.s
class PropLoadBalancerLBCookieStickinessPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.LBCookieStickinessPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html
Property Document:
- ``p_CookieExpirationPeriod``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod
- ``p_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.LBCookieStickinessPolicy"
p_CookieExpirationPeriod: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CookieExpirationPeriod"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod"""
p_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname"""
@attr.s
class PropLoadBalancerListeners(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Listeners"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html
Property Document:
- ``rp_InstancePort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport
- ``rp_LoadBalancerPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport
- ``rp_Protocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol
- ``p_InstanceProtocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol
- ``p_PolicyNames``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames
- ``p_SSLCertificateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.Listeners"
rp_InstancePort: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstancePort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport"""
rp_LoadBalancerPort: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport"""
rp_Protocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Protocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol"""
p_InstanceProtocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceProtocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol"""
p_PolicyNames: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PolicyNames"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames"""
p_SSLCertificateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SSLCertificateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid"""
@attr.s
class PropLoadBalancerPolicies(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Policies"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html
Property Document:
- ``rp_Attributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes
- ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname
- ``rp_PolicyType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype
- ``p_InstancePorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports
- ``p_LoadBalancerPorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.Policies"
rp_Attributes: typing.List[dict] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(dict), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Attributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes"""
rp_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname"""
rp_PolicyType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype"""
p_InstancePorts: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "InstancePorts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports"""
p_LoadBalancerPorts: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerPorts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports"""
#--- Resource declaration ---
@attr.s
class LoadBalancer(Resource):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html
Property Document:
- ``rp_Listeners``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners
- ``p_AccessLoggingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy
- ``p_AppCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy
- ``p_AvailabilityZones``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones
- ``p_ConnectionDrainingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy
- ``p_ConnectionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings
- ``p_CrossZone``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone
- ``p_HealthCheck``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck
- ``p_Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances
- ``p_LBCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy
- ``p_LoadBalancerName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname
- ``p_Policies``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies
- ``p_Scheme``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme
- ``p_SecurityGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups
- ``p_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer"
rp_Listeners: typing.List[typing.Union['PropLoadBalancerListeners', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerListeners.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerListeners), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Listeners"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners"""
p_AccessLoggingPolicy: typing.Union['PropLoadBalancerAccessLoggingPolicy', dict] = attr.ib(
default=None,
converter=PropLoadBalancerAccessLoggingPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerAccessLoggingPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "AccessLoggingPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy"""
p_AppCookieStickinessPolicy: typing.List[typing.Union['PropLoadBalancerAppCookieStickinessPolicy', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerAppCookieStickinessPolicy.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerAppCookieStickinessPolicy), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AppCookieStickinessPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy"""
p_AvailabilityZones: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AvailabilityZones"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones"""
p_ConnectionDrainingPolicy: typing.Union['PropLoadBalancerConnectionDrainingPolicy', dict] = attr.ib(
default=None,
converter=PropLoadBalancerConnectionDrainingPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerConnectionDrainingPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionDrainingPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy"""
p_ConnectionSettings: typing.Union['PropLoadBalancerConnectionSettings', dict] = attr.ib(
default=None,
converter=PropLoadBalancerConnectionSettings.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerConnectionSettings)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings"""
p_CrossZone: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "CrossZone"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone"""
p_HealthCheck: typing.Union['PropLoadBalancerHealthCheck', dict] = attr.ib(
default=None,
converter=PropLoadBalancerHealthCheck.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerHealthCheck)),
metadata={AttrMeta.PROPERTY_NAME: "HealthCheck"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck"""
p_Instances: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Instances"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances"""
p_LBCookieStickinessPolicy: typing.List[typing.Union['PropLoadBalancerLBCookieStickinessPolicy', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerLBCookieStickinessPolicy.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerLBCookieStickinessPolicy), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LBCookieStickinessPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy"""
p_LoadBalancerName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname"""
p_Policies: typing.List[typing.Union['PropLoadBalancerPolicies', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerPolicies.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerPolicies), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Policies"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies"""
p_Scheme: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Scheme"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme"""
p_SecurityGroups: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups"""
p_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Subnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags"""
@property
def rv_CanonicalHostedZoneName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="CanonicalHostedZoneName")
@property
def rv_CanonicalHostedZoneNameID(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="CanonicalHostedZoneNameID")
@property
def rv_DNSName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="DNSName")
@property
def rv_SourceSecurityGroupGroupName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="SourceSecurityGroup.GroupName")
@property
def rv_SourceSecurityGroupOwnerAlias(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="SourceSecurityGroup.OwnerAlias")
|
<filename>cottonformation/res/elasticloadbalancing.py
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class PropLoadBalancerAccessLoggingPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AccessLoggingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled
- ``rp_S3BucketName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname
- ``p_EmitInterval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval
- ``p_S3BucketPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.AccessLoggingPolicy"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled"""
rp_S3BucketName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3BucketName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname"""
p_EmitInterval: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "EmitInterval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval"""
p_S3BucketPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "S3BucketPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix"""
@attr.s
class PropLoadBalancerHealthCheck(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.HealthCheck"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html
Property Document:
- ``rp_HealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold
- ``rp_Interval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval
- ``rp_Target``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target
- ``rp_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout
- ``rp_UnhealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.HealthCheck"
rp_HealthyThreshold: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "HealthyThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold"""
rp_Interval: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Interval"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval"""
rp_Target: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Target"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target"""
rp_Timeout: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout"""
rp_UnhealthyThreshold: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "UnhealthyThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold"""
@attr.s
class PropLoadBalancerConnectionSettings(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionSettings"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html
Property Document:
- ``rp_IdleTimeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionSettings"
rp_IdleTimeout: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "IdleTimeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout"""
@attr.s
class PropLoadBalancerConnectionDrainingPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionDrainingPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html
Property Document:
- ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled
- ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionDrainingPolicy"
rp_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.instance_of(bool),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled"""
p_Timeout: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Timeout"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout"""
@attr.s
class PropLoadBalancerAppCookieStickinessPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AppCookieStickinessPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html
Property Document:
- ``rp_CookieName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename
- ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.AppCookieStickinessPolicy"
rp_CookieName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "CookieName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename"""
rp_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname"""
@attr.s
class PropLoadBalancerLBCookieStickinessPolicy(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.LBCookieStickinessPolicy"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html
Property Document:
- ``p_CookieExpirationPeriod``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod
- ``p_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.LBCookieStickinessPolicy"
p_CookieExpirationPeriod: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CookieExpirationPeriod"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod"""
p_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname"""
@attr.s
class PropLoadBalancerListeners(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Listeners"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html
Property Document:
- ``rp_InstancePort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport
- ``rp_LoadBalancerPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport
- ``rp_Protocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol
- ``p_InstanceProtocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol
- ``p_PolicyNames``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames
- ``p_SSLCertificateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.Listeners"
rp_InstancePort: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "InstancePort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport"""
rp_LoadBalancerPort: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerPort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport"""
rp_Protocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Protocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol"""
p_InstanceProtocol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "InstanceProtocol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol"""
p_PolicyNames: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PolicyNames"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames"""
p_SSLCertificateId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SSLCertificateId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid"""
@attr.s
class PropLoadBalancerPolicies(Property):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Policies"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html
Property Document:
- ``rp_Attributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes
- ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname
- ``rp_PolicyType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype
- ``p_InstancePorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports
- ``p_LoadBalancerPorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer.Policies"
rp_Attributes: typing.List[dict] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(dict), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Attributes"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes"""
rp_PolicyName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname"""
rp_PolicyType: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "PolicyType"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype"""
p_InstancePorts: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "InstancePorts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports"""
p_LoadBalancerPorts: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerPorts"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports"""
#--- Resource declaration ---
@attr.s
class LoadBalancer(Resource):
"""
AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html
Property Document:
- ``rp_Listeners``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners
- ``p_AccessLoggingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy
- ``p_AppCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy
- ``p_AvailabilityZones``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones
- ``p_ConnectionDrainingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy
- ``p_ConnectionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings
- ``p_CrossZone``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone
- ``p_HealthCheck``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck
- ``p_Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances
- ``p_LBCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy
- ``p_LoadBalancerName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname
- ``p_Policies``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies
- ``p_Scheme``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme
- ``p_SecurityGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups
- ``p_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags
"""
AWS_OBJECT_TYPE = "AWS::ElasticLoadBalancing::LoadBalancer"
rp_Listeners: typing.List[typing.Union['PropLoadBalancerListeners', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerListeners.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerListeners), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "Listeners"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners"""
p_AccessLoggingPolicy: typing.Union['PropLoadBalancerAccessLoggingPolicy', dict] = attr.ib(
default=None,
converter=PropLoadBalancerAccessLoggingPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerAccessLoggingPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "AccessLoggingPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy"""
p_AppCookieStickinessPolicy: typing.List[typing.Union['PropLoadBalancerAppCookieStickinessPolicy', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerAppCookieStickinessPolicy.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerAppCookieStickinessPolicy), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AppCookieStickinessPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy"""
p_AvailabilityZones: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "AvailabilityZones"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones"""
p_ConnectionDrainingPolicy: typing.Union['PropLoadBalancerConnectionDrainingPolicy', dict] = attr.ib(
default=None,
converter=PropLoadBalancerConnectionDrainingPolicy.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerConnectionDrainingPolicy)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionDrainingPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy"""
p_ConnectionSettings: typing.Union['PropLoadBalancerConnectionSettings', dict] = attr.ib(
default=None,
converter=PropLoadBalancerConnectionSettings.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerConnectionSettings)),
metadata={AttrMeta.PROPERTY_NAME: "ConnectionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings"""
p_CrossZone: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "CrossZone"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone"""
p_HealthCheck: typing.Union['PropLoadBalancerHealthCheck', dict] = attr.ib(
default=None,
converter=PropLoadBalancerHealthCheck.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(PropLoadBalancerHealthCheck)),
metadata={AttrMeta.PROPERTY_NAME: "HealthCheck"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck"""
p_Instances: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Instances"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances"""
p_LBCookieStickinessPolicy: typing.List[typing.Union['PropLoadBalancerLBCookieStickinessPolicy', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerLBCookieStickinessPolicy.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerLBCookieStickinessPolicy), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "LBCookieStickinessPolicy"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy"""
p_LoadBalancerName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "LoadBalancerName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname"""
p_Policies: typing.List[typing.Union['PropLoadBalancerPolicies', dict]] = attr.ib(
default=None,
converter=PropLoadBalancerPolicies.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(PropLoadBalancerPolicies), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Policies"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies"""
p_Scheme: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Scheme"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme"""
p_SecurityGroups: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroups"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups"""
p_Subnets: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Subnets"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags"""
@property
def rv_CanonicalHostedZoneName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="CanonicalHostedZoneName")
@property
def rv_CanonicalHostedZoneNameID(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="CanonicalHostedZoneNameID")
@property
def rv_DNSName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="DNSName")
@property
def rv_SourceSecurityGroupGroupName(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="SourceSecurityGroup.GroupName")
@property
def rv_SourceSecurityGroupOwnerAlias(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values"""
return GetAtt(resource=self, attr_name="SourceSecurityGroup.OwnerAlias")
|
en
| 0.611198
|
# -*- coding: utf-8 -*- This module #--- Property declaration --- AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AccessLoggingPolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html Property Document: - ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled - ``rp_S3BucketName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname - ``p_EmitInterval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval - ``p_S3BucketPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-enabled Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketname Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-emitinterval Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-accessloggingpolicy.html#cfn-elb-accessloggingpolicy-s3bucketprefix AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.HealthCheck" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html Property Document: - ``rp_HealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold - ``rp_Interval``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval - ``rp_Target``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target - ``rp_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout - ``rp_UnhealthyThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-healthythreshold Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-interval Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-target Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-timeout Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-health-check.html#cfn-elb-healthcheck-unhealthythreshold AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionSettings" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html Property Document: - ``rp_IdleTimeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectionsettings.html#cfn-elb-connectionsettings-idletimeout AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.ConnectionDrainingPolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html Property Document: - ``rp_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled - ``p_Timeout``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-enabled Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-connectiondrainingpolicy.html#cfn-elb-connectiondrainingpolicy-timeout AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.AppCookieStickinessPolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html Property Document: - ``rp_CookieName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename - ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-cookiename Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-AppCookieStickinessPolicy.html#cfn-elb-appcookiestickinesspolicy-policyname AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.LBCookieStickinessPolicy" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html Property Document: - ``p_CookieExpirationPeriod``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod - ``p_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-cookieexpirationperiod Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-LBCookieStickinessPolicy.html#cfn-elb-lbcookiestickinesspolicy-policyname AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Listeners" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html Property Document: - ``rp_InstancePort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport - ``rp_LoadBalancerPort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport - ``rp_Protocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol - ``p_InstanceProtocol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol - ``p_PolicyNames``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames - ``p_SSLCertificateId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceport Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-loadbalancerport Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-protocol Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-instanceprotocol Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-policynames Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-listener.html#cfn-ec2-elb-listener-sslcertificateid AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer.Policies" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html Property Document: - ``rp_Attributes``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes - ``rp_PolicyName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname - ``rp_PolicyType``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype - ``p_InstancePorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports - ``p_LoadBalancerPorts``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-attributes Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policyname Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-policytype Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-instanceports Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb-policy.html#cfn-ec2-elb-policy-loadbalancerports #--- Resource declaration --- AWS Object Type = "AWS::ElasticLoadBalancing::LoadBalancer" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html Property Document: - ``rp_Listeners``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners - ``p_AccessLoggingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy - ``p_AppCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy - ``p_AvailabilityZones``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones - ``p_ConnectionDrainingPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy - ``p_ConnectionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings - ``p_CrossZone``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone - ``p_HealthCheck``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck - ``p_Instances``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances - ``p_LBCookieStickinessPolicy``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy - ``p_LoadBalancerName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname - ``p_Policies``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies - ``p_Scheme``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme - ``p_SecurityGroups``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups - ``p_Subnets``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-listeners Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-accessloggingpolicy Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-appcookiestickinesspolicy Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-availabilityzones Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectiondrainingpolicy Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-connectionsettings Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-crosszone Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-healthcheck Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-instances Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-lbcookiestickinesspolicy Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-elbname Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-policies Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-scheme Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-securitygroups Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-ec2-elb-subnets Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#cfn-elasticloadbalancing-loadbalancer-tags Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html#aws-properties-ec2-elb-return-values
| 1.934069
| 2
|
agagd/agagd_core/models.py
|
leeschumacher/agagd
| 0
|
6627692
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class Member(models.Model):
class Meta:
db_table = u'members'
verbose_name = u'member'
verbose_name_plural = u'members'
member_id = models.AutoField(primary_key=True)
legacy_id = models.TextField(blank=True) # This field type is a guess.
full_name = models.CharField(max_length=255, blank=True)
given_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
join_date = models.DateField(null=True, blank=True)
city = models.CharField(max_length=255, blank=True)
state = models.CharField(max_length=255, blank=True)
region = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255)
chapter = models.CharField(max_length=100, blank=True)
chapter_id = models.TextField(blank=True) # This field type is a guess.
occupation = models.CharField(max_length=100, blank=True)
citizen = models.TextField() # This field type is a guess.
password = models.CharField(max_length=255, blank=True)
last_changed = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return u" %s (%s) " % (self.full_name, self.member_id, )
class Chapter(models.Model):
chapter_code = models.CharField(max_length=4, primary_key=True, db_column=u'Chapter_Code') # x.
chapter_descr = models.CharField(max_length=50, db_column=u'Chapter_Descr') # x.
class Meta:
db_table = u'chapter'
class Chapters(models.Model):
# TODO this is not member_id? seems more like a normal pk for ChapterInfo
member_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
name = models.CharField(max_length=255, blank=True)
legacy_status = models.CharField(max_length=1, blank=True)
code = models.CharField(max_length=4, blank=True)
contact = models.CharField(max_length=255, blank=True)
rep_id = models.TextField(blank=True) # This field type is a guess.
url = models.CharField(max_length=255, blank=True)
meeting_city = models.CharField(max_length=255, blank=True)
contact_html = models.TextField(blank=True)
contact_text = models.TextField(blank=True)
meeting_text = models.TextField(blank=True)
size = models.CharField(max_length=255, blank=True)
events = models.TextField(blank=True)
comments = models.TextField(blank=True)
fees = models.CharField(max_length=255, blank=True)
display = models.TextField() # This field type is a guess.
class Meta:
db_table = u'chapters'
class CommentsAuthors(models.Model):
id = models.CharField(max_length=12, primary_key=True, db_column=u'Id') # x.
last_name = models.CharField(max_length=50, db_column=u'Last_Name') # x.
first_name = models.CharField(max_length=50, db_column=u'First_Name') # x.
country = models.CharField(max_length=3, db_column=u'Country') # x.
pin = models.TextField(db_column=u'PIN') # x. This field type is a guess.
class Meta:
db_table = u'comments_authors'
class Country(models.Model):
country_code = models.CharField(max_length=2, primary_key=True, db_column=u'Country_Code') # x.
country_descr = models.CharField(max_length=50, db_column=u'Country_Descr') # x.
country_flag = models.CharField(max_length=4, db_column=u'Country_Flag', blank=True) # x.
class Meta:
db_table = u'country'
class Tournament(models.Model):
tournament_code = models.CharField(max_length=20, primary_key=True, db_column=u'Tournament_Code')
description = models.TextField(db_column='Tournament_Descr')
tournament_date = models.DateField(db_column=u'Tournament_Date')
elab_date = models.DateField(db_column=u'Elab_Date')
city = models.CharField(max_length=30, db_column=u'City')
state = models.CharField(max_length=2, db_column=u'State_Code', blank=True)
rounds = models.IntegerField(db_column='Rounds')
total_players = models.IntegerField(db_column='Total_Players')
wall_list = models.TextField(db_column='Wallist')
def __str__(self):
return "%s - on %s with %d players" % (self.tournament_code, self.tournament_date, self.total_players)
def __unicode__(self):
if self.description:
if len(self.description) > 40:
return u'%s...' % self.description[0:37]
return u'%s' % self.description
else:
return u'%s' % self.pk
class Meta:
db_table= u'tournaments'
verbose_name = u'tournament'
verbose_name_plural = u'tournaments'
class Game(models.Model):
game_id = models.AutoField(primary_key=True, db_column=u'Game_ID') # x. This field type is a guess.
game_date = models.DateField(db_column=u'Game_Date') # x.
round = models.TextField(db_column=u'Round') # x. This field type is a guess.
color_1 = models.CharField(max_length=1, db_column=u'Color_1') # x.
rank_1 = models.CharField(max_length=3, db_column=u'Rank_1') # x.
color_2 = models.CharField(max_length=1, db_column=u'Color_2') # x.
rank_2 = models.CharField(max_length=3, db_column=u'Rank_2') # x.
handicap = models.IntegerField(db_column=u'Handicap') # x. This field type is a guess.
komi = models.FloatField(db_column=u'Komi') # x. This field type is a guess.
result = models.CharField(max_length=1, db_column=u'Result') # x.
sgf_code = models.CharField(max_length=26, db_column=u'Sgf_Code', blank=True) # x.
online = models.TextField(db_column=u'Online', blank=True) # x. This field type is a guess.
exclude = models.TextField(db_column=u'Exclude', blank=True) # x. This field type is a guess.
rated = models.TextField(db_column=u'Rated', blank=True) # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date') # x.
tournament_code = models.ForeignKey(Tournament, related_name='games_in_tourney', db_column=u'Tournament_Code') # .
pin_player_1 = models.ForeignKey(Member, db_column=u'Pin_Player_1', related_name='games_as_p1')
pin_player_2 = models.ForeignKey(Member, db_column=u'Pin_Player_2', related_name='games_as_p2')
class Meta:
db_table = u'games'
verbose_name = u'game'
verbose_name_plural = u'games'
def __unicode__(self):
return u"Tournament %s Round %s, %s vs %s" % (self.tournament_code,
self.round, self.pin_player_1, self.pin_player_2)
def __str__(self):
return str(self.__unicode__())
def player_other_than(self, one_player):
""" returns the member of the other player. """
return self.pin_player_2 if (one_player == self.pin_player_1) else self.pin_player_1
def winner(self):
if self.result == self.color_1:
return self.pin_player_1
if self.result == self.color_2:
return self.pin_player_2
raise ValueError
def won_by(self, p1):
return self.winner() == p1
class Rating(models.Model):
pin_player = models.ForeignKey(Member, db_column=u'Pin_Player', related_name='ratings_set', primary_key=True)
tournament = models.ForeignKey(Tournament, db_column=u'Tournament_Code', related_name='ratings_set')
rating = models.FloatField(db_column=u'Rating') # x. This field type is a guess.
sigma = models.FloatField(db_column=u'Sigma') # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date')
class Meta:
db_table = u'ratings'
class MembersRegions(models.Model):
region_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
region = models.CharField(max_length=255, blank=True)
states = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'members_regions'
class Membership(models.Model):
mtype = models.CharField(max_length=8, primary_key=True, db_column=u'MType') # x.
membership_type = models.CharField(max_length=35, db_column=u'Membership_Type') # x.
class Meta:
db_table = u'membership'
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from django.db import models
class Member(models.Model):
class Meta:
db_table = u'members'
verbose_name = u'member'
verbose_name_plural = u'members'
member_id = models.AutoField(primary_key=True)
legacy_id = models.TextField(blank=True) # This field type is a guess.
full_name = models.CharField(max_length=255, blank=True)
given_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
join_date = models.DateField(null=True, blank=True)
city = models.CharField(max_length=255, blank=True)
state = models.CharField(max_length=255, blank=True)
region = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255)
chapter = models.CharField(max_length=100, blank=True)
chapter_id = models.TextField(blank=True) # This field type is a guess.
occupation = models.CharField(max_length=100, blank=True)
citizen = models.TextField() # This field type is a guess.
password = models.CharField(max_length=255, blank=True)
last_changed = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return u" %s (%s) " % (self.full_name, self.member_id, )
class Chapter(models.Model):
chapter_code = models.CharField(max_length=4, primary_key=True, db_column=u'Chapter_Code') # x.
chapter_descr = models.CharField(max_length=50, db_column=u'Chapter_Descr') # x.
class Meta:
db_table = u'chapter'
class Chapters(models.Model):
# TODO this is not member_id? seems more like a normal pk for ChapterInfo
member_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
name = models.CharField(max_length=255, blank=True)
legacy_status = models.CharField(max_length=1, blank=True)
code = models.CharField(max_length=4, blank=True)
contact = models.CharField(max_length=255, blank=True)
rep_id = models.TextField(blank=True) # This field type is a guess.
url = models.CharField(max_length=255, blank=True)
meeting_city = models.CharField(max_length=255, blank=True)
contact_html = models.TextField(blank=True)
contact_text = models.TextField(blank=True)
meeting_text = models.TextField(blank=True)
size = models.CharField(max_length=255, blank=True)
events = models.TextField(blank=True)
comments = models.TextField(blank=True)
fees = models.CharField(max_length=255, blank=True)
display = models.TextField() # This field type is a guess.
class Meta:
db_table = u'chapters'
class CommentsAuthors(models.Model):
id = models.CharField(max_length=12, primary_key=True, db_column=u'Id') # x.
last_name = models.CharField(max_length=50, db_column=u'Last_Name') # x.
first_name = models.CharField(max_length=50, db_column=u'First_Name') # x.
country = models.CharField(max_length=3, db_column=u'Country') # x.
pin = models.TextField(db_column=u'PIN') # x. This field type is a guess.
class Meta:
db_table = u'comments_authors'
class Country(models.Model):
country_code = models.CharField(max_length=2, primary_key=True, db_column=u'Country_Code') # x.
country_descr = models.CharField(max_length=50, db_column=u'Country_Descr') # x.
country_flag = models.CharField(max_length=4, db_column=u'Country_Flag', blank=True) # x.
class Meta:
db_table = u'country'
class Tournament(models.Model):
tournament_code = models.CharField(max_length=20, primary_key=True, db_column=u'Tournament_Code')
description = models.TextField(db_column='Tournament_Descr')
tournament_date = models.DateField(db_column=u'Tournament_Date')
elab_date = models.DateField(db_column=u'Elab_Date')
city = models.CharField(max_length=30, db_column=u'City')
state = models.CharField(max_length=2, db_column=u'State_Code', blank=True)
rounds = models.IntegerField(db_column='Rounds')
total_players = models.IntegerField(db_column='Total_Players')
wall_list = models.TextField(db_column='Wallist')
def __str__(self):
return "%s - on %s with %d players" % (self.tournament_code, self.tournament_date, self.total_players)
def __unicode__(self):
if self.description:
if len(self.description) > 40:
return u'%s...' % self.description[0:37]
return u'%s' % self.description
else:
return u'%s' % self.pk
class Meta:
db_table= u'tournaments'
verbose_name = u'tournament'
verbose_name_plural = u'tournaments'
class Game(models.Model):
game_id = models.AutoField(primary_key=True, db_column=u'Game_ID') # x. This field type is a guess.
game_date = models.DateField(db_column=u'Game_Date') # x.
round = models.TextField(db_column=u'Round') # x. This field type is a guess.
color_1 = models.CharField(max_length=1, db_column=u'Color_1') # x.
rank_1 = models.CharField(max_length=3, db_column=u'Rank_1') # x.
color_2 = models.CharField(max_length=1, db_column=u'Color_2') # x.
rank_2 = models.CharField(max_length=3, db_column=u'Rank_2') # x.
handicap = models.IntegerField(db_column=u'Handicap') # x. This field type is a guess.
komi = models.FloatField(db_column=u'Komi') # x. This field type is a guess.
result = models.CharField(max_length=1, db_column=u'Result') # x.
sgf_code = models.CharField(max_length=26, db_column=u'Sgf_Code', blank=True) # x.
online = models.TextField(db_column=u'Online', blank=True) # x. This field type is a guess.
exclude = models.TextField(db_column=u'Exclude', blank=True) # x. This field type is a guess.
rated = models.TextField(db_column=u'Rated', blank=True) # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date') # x.
tournament_code = models.ForeignKey(Tournament, related_name='games_in_tourney', db_column=u'Tournament_Code') # .
pin_player_1 = models.ForeignKey(Member, db_column=u'Pin_Player_1', related_name='games_as_p1')
pin_player_2 = models.ForeignKey(Member, db_column=u'Pin_Player_2', related_name='games_as_p2')
class Meta:
db_table = u'games'
verbose_name = u'game'
verbose_name_plural = u'games'
def __unicode__(self):
return u"Tournament %s Round %s, %s vs %s" % (self.tournament_code,
self.round, self.pin_player_1, self.pin_player_2)
def __str__(self):
return str(self.__unicode__())
def player_other_than(self, one_player):
""" returns the member of the other player. """
return self.pin_player_2 if (one_player == self.pin_player_1) else self.pin_player_1
def winner(self):
if self.result == self.color_1:
return self.pin_player_1
if self.result == self.color_2:
return self.pin_player_2
raise ValueError
def won_by(self, p1):
return self.winner() == p1
class Rating(models.Model):
pin_player = models.ForeignKey(Member, db_column=u'Pin_Player', related_name='ratings_set', primary_key=True)
tournament = models.ForeignKey(Tournament, db_column=u'Tournament_Code', related_name='ratings_set')
rating = models.FloatField(db_column=u'Rating') # x. This field type is a guess.
sigma = models.FloatField(db_column=u'Sigma') # x. This field type is a guess.
elab_date = models.DateField(db_column=u'Elab_Date')
class Meta:
db_table = u'ratings'
class MembersRegions(models.Model):
region_id = models.CharField(max_length=255, primary_key=True) # This field type is a guess.
region = models.CharField(max_length=255, blank=True)
states = models.CharField(max_length=255, blank=True)
class Meta:
db_table = u'members_regions'
class Membership(models.Model):
mtype = models.CharField(max_length=8, primary_key=True, db_column=u'MType') # x.
membership_type = models.CharField(max_length=35, db_column=u'Membership_Type') # x.
class Meta:
db_table = u'membership'
|
en
| 0.942578
|
# This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # Feel free to rename the models, but don't rename db_table values or field names. # # Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]' # into your database. # This field type is a guess. # This field type is a guess. # This field type is a guess. # x. # x. # TODO this is not member_id? seems more like a normal pk for ChapterInfo # This field type is a guess. # This field type is a guess. # This field type is a guess. # x. # x. # x. # x. # x. This field type is a guess. # x. # x. # x. # x. This field type is a guess. # x. # x. This field type is a guess. # x. # x. # x. # x. # x. This field type is a guess. # x. This field type is a guess. # x. # x. # x. This field type is a guess. # x. This field type is a guess. # x. This field type is a guess. # x. # . returns the member of the other player. # x. This field type is a guess. # x. This field type is a guess. # This field type is a guess. # x. # x.
| 2.279807
| 2
|
knack/output.py
|
srinivas32/knack
| 0
|
6627693
|
<gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import errno
import json
import traceback
import sys
from collections import OrderedDict
from six import StringIO, text_type, u, string_types
from .util import CLIError, CommandResultItem, CtxTypeError
from .events import EVENT_INVOKER_POST_PARSE_ARGS, EVENT_PARSER_GLOBAL_CREATE
from .log import get_logger
logger = get_logger(__name__)
def _decode_str(output):
if not isinstance(output, text_type):
output = u(str(output))
return output
class _ComplexEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, bytes) and not isinstance(o, str):
return o.decode()
return json.JSONEncoder.default(self, o)
def format_json(obj):
result = obj.result
# OrderedDict.__dict__ is always '{}', to persist the data, convert to dict first.
input_dict = dict(result) if hasattr(result, '__dict__') else result
return json.dumps(input_dict, ensure_ascii=False, indent=2, sort_keys=True, cls=_ComplexEncoder,
separators=(',', ': ')) + '\n'
def format_json_color(obj):
from pygments import highlight, lexers, formatters
return highlight(format_json(obj), lexers.JsonLexer(), formatters.TerminalFormatter()) # pylint: disable=no-member
def format_yaml(obj):
import yaml
try:
return yaml.safe_dump(obj.result, default_flow_style=False, allow_unicode=True)
except yaml.representer.RepresenterError:
# yaml.safe_dump fails when obj.result is an OrderedDict. knack's --query implementation converts the result to an OrderedDict. https://github.com/microsoft/knack/blob/af674bfea793ff42ae31a381a21478bae4b71d7f/knack/query.py#L46. # pylint: disable=line-too-long
return yaml.safe_dump(json.loads(json.dumps(obj.result)), default_flow_style=False, allow_unicode=True)
def format_yaml_color(obj):
from pygments import highlight, lexers, formatters
return highlight(format_yaml(obj), lexers.YamlLexer(), formatters.TerminalFormatter()) # pylint: disable=no-member
def format_none(_):
return ""
def format_table(obj):
result = obj.result
try:
if obj.table_transformer and not obj.is_query_active:
if isinstance(obj.table_transformer, str):
from jmespath import compile as compile_jmes, Options
result = compile_jmes(obj.table_transformer).search(result, Options(OrderedDict))
else:
result = obj.table_transformer(result)
result_list = result if isinstance(result, list) else [result]
should_sort_keys = not obj.is_query_active and not obj.table_transformer
to = _TableOutput(should_sort_keys)
return to.dump(result_list)
except:
logger.debug(traceback.format_exc())
raise CLIError("Table output unavailable. "
"Use the --query option to specify an appropriate query. "
"Use --debug for more info.")
def format_tsv(obj):
result = obj.result
result_list = result if isinstance(result, list) else [result]
return _TsvOutput.dump(result_list)
class OutputProducer(object):
ARG_DEST = '_output_format'
_FORMAT_DICT = {
'json': format_json,
'jsonc': format_json_color,
'yaml': format_yaml,
'yamlc': format_yaml_color,
'table': format_table,
'tsv': format_tsv,
'none': format_none,
}
@staticmethod
def on_global_arguments(cli_ctx, **kwargs):
arg_group = kwargs.get('arg_group')
arg_group.add_argument('--output', '-o', dest=OutputProducer.ARG_DEST,
choices=list(OutputProducer._FORMAT_DICT),
default=cli_ctx.config.get('core', 'output', fallback='json'),
help='Output format',
type=str.lower)
@staticmethod
def handle_output_argument(cli_ctx, **kwargs):
args = kwargs.get('args')
# Set the output type for this invocation
cli_ctx.invocation.data['output'] = getattr(args, OutputProducer.ARG_DEST)
def __init__(self, cli_ctx=None):
""" Manages the production of output from the result of a command invocation
:param cli_ctx: CLI Context
:type cli_ctx: knack.cli.CLI
"""
from .cli import CLI
if cli_ctx is not None and not isinstance(cli_ctx, CLI):
raise CtxTypeError(cli_ctx)
self.cli_ctx = cli_ctx
self.cli_ctx.register_event(EVENT_PARSER_GLOBAL_CREATE, OutputProducer.on_global_arguments)
self.cli_ctx.register_event(EVENT_INVOKER_POST_PARSE_ARGS, OutputProducer.handle_output_argument)
def out(self, obj, formatter=None, out_file=None): # pylint: disable=no-self-use
""" Produces the output using the command result.
The method does not return a result as the output is written straight to the output file.
:param obj: The command result
:type obj: knack.util.CommandResultItem
:param formatter: The formatter we should use for the command result
:type formatter: function
:param out_file: The file to write output to
:type out_file: file-like object
"""
if not isinstance(obj, CommandResultItem):
raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj)))
output = formatter(obj)
try:
print(output, file=out_file, end='')
except IOError as ex:
if ex.errno == errno.EPIPE:
pass
else:
raise
except UnicodeEncodeError:
logger.warning("Unable to encode the output with %s encoding. Unsupported characters are discarded.",
out_file.encoding)
print(output.encode('ascii', 'ignore').decode('utf-8', 'ignore'),
file=out_file, end='')
def get_formatter(self, format_type): # pylint: disable=no-self-use
# remove color if stdout is not a tty
if not sys.stdout.isatty() and format_type == 'jsonc':
return OutputProducer._FORMAT_DICT['json']
if not sys.stdout.isatty() and format_type == 'yamlc':
return OutputProducer._FORMAT_DICT['yaml']
return OutputProducer._FORMAT_DICT[format_type]
class _TableOutput(object): # pylint: disable=too-few-public-methods
SKIP_KEYS = ['id', 'type', 'etag']
def __init__(self, should_sort_keys=False):
self.should_sort_keys = should_sort_keys
@staticmethod
def _capitalize_first_char(x):
return x[0].upper() + x[1:] if x else x
def _auto_table_item(self, item):
new_entry = OrderedDict()
try:
keys = sorted(item) if self.should_sort_keys and isinstance(item, dict) else item.keys()
for k in keys:
if k in _TableOutput.SKIP_KEYS:
continue
if item[k] is not None and not isinstance(item[k], (list, dict, set)):
new_entry[_TableOutput._capitalize_first_char(k)] = item[k]
except AttributeError:
# handles odd cases where a string/bool/etc. is returned
if isinstance(item, list):
for col, val in enumerate(item):
new_entry['Column{}'.format(col + 1)] = val
else:
new_entry['Result'] = item
return new_entry
def _auto_table(self, result):
if isinstance(result, list):
new_result = []
for item in result:
new_result.append(self._auto_table_item(item))
return new_result
return self._auto_table_item(result)
def dump(self, data):
from tabulate import tabulate
table_data = self._auto_table(data)
table_str = tabulate(table_data, headers="keys", tablefmt="simple",
disable_numparse=True) if table_data else ''
if table_str == '\n':
raise ValueError('Unable to extract fields for table.')
return table_str + '\n'
class _TsvOutput(object): # pylint: disable=too-few-public-methods
@staticmethod
def _dump_obj(data, stream):
if isinstance(data, list):
stream.write(str(len(data)))
elif isinstance(data, dict):
# We need to print something to avoid mismatching
# number of columns if the value is None for some instances
# and a dictionary value in other...
stream.write('')
else:
to_write = data if isinstance(data, string_types) else str(data)
stream.write(to_write)
@staticmethod
def _dump_row(data, stream):
separator = ''
if isinstance(data, (dict, list)):
if isinstance(data, OrderedDict):
values = data.values()
elif isinstance(data, dict):
values = [value for _, value in sorted(data.items())]
else:
values = data
# Iterate through the items either sorted by key value (if dict) or in the order
# they were added (in the cases of an ordered dict) in order to make the output
# stable
for value in values:
stream.write(separator)
_TsvOutput._dump_obj(value, stream)
separator = '\t'
elif isinstance(data, list):
for value in data:
stream.write(separator)
_TsvOutput._dump_obj(value, stream)
separator = '\t'
elif isinstance(data, bool):
_TsvOutput._dump_obj(str(data).lower(), stream)
else:
_TsvOutput._dump_obj(data, stream)
stream.write('\n')
@staticmethod
def dump(data):
io = StringIO()
for item in data:
_TsvOutput._dump_row(item, io)
result = io.getvalue()
io.close()
return result
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import errno
import json
import traceback
import sys
from collections import OrderedDict
from six import StringIO, text_type, u, string_types
from .util import CLIError, CommandResultItem, CtxTypeError
from .events import EVENT_INVOKER_POST_PARSE_ARGS, EVENT_PARSER_GLOBAL_CREATE
from .log import get_logger
logger = get_logger(__name__)
def _decode_str(output):
if not isinstance(output, text_type):
output = u(str(output))
return output
class _ComplexEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, bytes) and not isinstance(o, str):
return o.decode()
return json.JSONEncoder.default(self, o)
def format_json(obj):
result = obj.result
# OrderedDict.__dict__ is always '{}', to persist the data, convert to dict first.
input_dict = dict(result) if hasattr(result, '__dict__') else result
return json.dumps(input_dict, ensure_ascii=False, indent=2, sort_keys=True, cls=_ComplexEncoder,
separators=(',', ': ')) + '\n'
def format_json_color(obj):
from pygments import highlight, lexers, formatters
return highlight(format_json(obj), lexers.JsonLexer(), formatters.TerminalFormatter()) # pylint: disable=no-member
def format_yaml(obj):
import yaml
try:
return yaml.safe_dump(obj.result, default_flow_style=False, allow_unicode=True)
except yaml.representer.RepresenterError:
# yaml.safe_dump fails when obj.result is an OrderedDict. knack's --query implementation converts the result to an OrderedDict. https://github.com/microsoft/knack/blob/af674bfea793ff42ae31a381a21478bae4b71d7f/knack/query.py#L46. # pylint: disable=line-too-long
return yaml.safe_dump(json.loads(json.dumps(obj.result)), default_flow_style=False, allow_unicode=True)
def format_yaml_color(obj):
from pygments import highlight, lexers, formatters
return highlight(format_yaml(obj), lexers.YamlLexer(), formatters.TerminalFormatter()) # pylint: disable=no-member
def format_none(_):
return ""
def format_table(obj):
result = obj.result
try:
if obj.table_transformer and not obj.is_query_active:
if isinstance(obj.table_transformer, str):
from jmespath import compile as compile_jmes, Options
result = compile_jmes(obj.table_transformer).search(result, Options(OrderedDict))
else:
result = obj.table_transformer(result)
result_list = result if isinstance(result, list) else [result]
should_sort_keys = not obj.is_query_active and not obj.table_transformer
to = _TableOutput(should_sort_keys)
return to.dump(result_list)
except:
logger.debug(traceback.format_exc())
raise CLIError("Table output unavailable. "
"Use the --query option to specify an appropriate query. "
"Use --debug for more info.")
def format_tsv(obj):
result = obj.result
result_list = result if isinstance(result, list) else [result]
return _TsvOutput.dump(result_list)
class OutputProducer(object):
ARG_DEST = '_output_format'
_FORMAT_DICT = {
'json': format_json,
'jsonc': format_json_color,
'yaml': format_yaml,
'yamlc': format_yaml_color,
'table': format_table,
'tsv': format_tsv,
'none': format_none,
}
@staticmethod
def on_global_arguments(cli_ctx, **kwargs):
arg_group = kwargs.get('arg_group')
arg_group.add_argument('--output', '-o', dest=OutputProducer.ARG_DEST,
choices=list(OutputProducer._FORMAT_DICT),
default=cli_ctx.config.get('core', 'output', fallback='json'),
help='Output format',
type=str.lower)
@staticmethod
def handle_output_argument(cli_ctx, **kwargs):
args = kwargs.get('args')
# Set the output type for this invocation
cli_ctx.invocation.data['output'] = getattr(args, OutputProducer.ARG_DEST)
def __init__(self, cli_ctx=None):
""" Manages the production of output from the result of a command invocation
:param cli_ctx: CLI Context
:type cli_ctx: knack.cli.CLI
"""
from .cli import CLI
if cli_ctx is not None and not isinstance(cli_ctx, CLI):
raise CtxTypeError(cli_ctx)
self.cli_ctx = cli_ctx
self.cli_ctx.register_event(EVENT_PARSER_GLOBAL_CREATE, OutputProducer.on_global_arguments)
self.cli_ctx.register_event(EVENT_INVOKER_POST_PARSE_ARGS, OutputProducer.handle_output_argument)
def out(self, obj, formatter=None, out_file=None): # pylint: disable=no-self-use
""" Produces the output using the command result.
The method does not return a result as the output is written straight to the output file.
:param obj: The command result
:type obj: knack.util.CommandResultItem
:param formatter: The formatter we should use for the command result
:type formatter: function
:param out_file: The file to write output to
:type out_file: file-like object
"""
if not isinstance(obj, CommandResultItem):
raise TypeError('Expected {} got {}'.format(CommandResultItem.__name__, type(obj)))
output = formatter(obj)
try:
print(output, file=out_file, end='')
except IOError as ex:
if ex.errno == errno.EPIPE:
pass
else:
raise
except UnicodeEncodeError:
logger.warning("Unable to encode the output with %s encoding. Unsupported characters are discarded.",
out_file.encoding)
print(output.encode('ascii', 'ignore').decode('utf-8', 'ignore'),
file=out_file, end='')
def get_formatter(self, format_type): # pylint: disable=no-self-use
# remove color if stdout is not a tty
if not sys.stdout.isatty() and format_type == 'jsonc':
return OutputProducer._FORMAT_DICT['json']
if not sys.stdout.isatty() and format_type == 'yamlc':
return OutputProducer._FORMAT_DICT['yaml']
return OutputProducer._FORMAT_DICT[format_type]
class _TableOutput(object): # pylint: disable=too-few-public-methods
SKIP_KEYS = ['id', 'type', 'etag']
def __init__(self, should_sort_keys=False):
self.should_sort_keys = should_sort_keys
@staticmethod
def _capitalize_first_char(x):
return x[0].upper() + x[1:] if x else x
def _auto_table_item(self, item):
new_entry = OrderedDict()
try:
keys = sorted(item) if self.should_sort_keys and isinstance(item, dict) else item.keys()
for k in keys:
if k in _TableOutput.SKIP_KEYS:
continue
if item[k] is not None and not isinstance(item[k], (list, dict, set)):
new_entry[_TableOutput._capitalize_first_char(k)] = item[k]
except AttributeError:
# handles odd cases where a string/bool/etc. is returned
if isinstance(item, list):
for col, val in enumerate(item):
new_entry['Column{}'.format(col + 1)] = val
else:
new_entry['Result'] = item
return new_entry
def _auto_table(self, result):
if isinstance(result, list):
new_result = []
for item in result:
new_result.append(self._auto_table_item(item))
return new_result
return self._auto_table_item(result)
def dump(self, data):
from tabulate import tabulate
table_data = self._auto_table(data)
table_str = tabulate(table_data, headers="keys", tablefmt="simple",
disable_numparse=True) if table_data else ''
if table_str == '\n':
raise ValueError('Unable to extract fields for table.')
return table_str + '\n'
class _TsvOutput(object): # pylint: disable=too-few-public-methods
@staticmethod
def _dump_obj(data, stream):
if isinstance(data, list):
stream.write(str(len(data)))
elif isinstance(data, dict):
# We need to print something to avoid mismatching
# number of columns if the value is None for some instances
# and a dictionary value in other...
stream.write('')
else:
to_write = data if isinstance(data, string_types) else str(data)
stream.write(to_write)
@staticmethod
def _dump_row(data, stream):
separator = ''
if isinstance(data, (dict, list)):
if isinstance(data, OrderedDict):
values = data.values()
elif isinstance(data, dict):
values = [value for _, value in sorted(data.items())]
else:
values = data
# Iterate through the items either sorted by key value (if dict) or in the order
# they were added (in the cases of an ordered dict) in order to make the output
# stable
for value in values:
stream.write(separator)
_TsvOutput._dump_obj(value, stream)
separator = '\t'
elif isinstance(data, list):
for value in data:
stream.write(separator)
_TsvOutput._dump_obj(value, stream)
separator = '\t'
elif isinstance(data, bool):
_TsvOutput._dump_obj(str(data).lower(), stream)
else:
_TsvOutput._dump_obj(data, stream)
stream.write('\n')
@staticmethod
def dump(data):
io = StringIO()
for item in data:
_TsvOutput._dump_row(item, io)
result = io.getvalue()
io.close()
return result
|
en
| 0.675009
|
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=method-hidden # OrderedDict.__dict__ is always '{}', to persist the data, convert to dict first. # pylint: disable=no-member # yaml.safe_dump fails when obj.result is an OrderedDict. knack's --query implementation converts the result to an OrderedDict. https://github.com/microsoft/knack/blob/af674bfea793ff42ae31a381a21478bae4b71d7f/knack/query.py#L46. # pylint: disable=line-too-long # pylint: disable=no-member # Set the output type for this invocation Manages the production of output from the result of a command invocation :param cli_ctx: CLI Context :type cli_ctx: knack.cli.CLI # pylint: disable=no-self-use Produces the output using the command result. The method does not return a result as the output is written straight to the output file. :param obj: The command result :type obj: knack.util.CommandResultItem :param formatter: The formatter we should use for the command result :type formatter: function :param out_file: The file to write output to :type out_file: file-like object # pylint: disable=no-self-use # remove color if stdout is not a tty # pylint: disable=too-few-public-methods # handles odd cases where a string/bool/etc. is returned # pylint: disable=too-few-public-methods # We need to print something to avoid mismatching # number of columns if the value is None for some instances # and a dictionary value in other... # Iterate through the items either sorted by key value (if dict) or in the order # they were added (in the cases of an ordered dict) in order to make the output # stable
| 1.986711
| 2
|
setup.py
|
jeffreylovitz/RLTest
| 0
|
6627694
|
<filename>setup.py
from setuptools import setup, find_packages
setup(
name='RLTest',
version='0.2.1',
description="Redis Labs Test Framework, allow to run tests on redis and modules on a variety of environments.",
packages=find_packages(),
install_requires=[
'redis>=3.0.0',
'redis-py-cluster>=2.1.0',
'psutil',
'distro>=1.4.0'
],
entry_points='''
[console_scripts]
RLTest=RLTest.__main__:main
'''
)
|
<filename>setup.py
from setuptools import setup, find_packages
setup(
name='RLTest',
version='0.2.1',
description="Redis Labs Test Framework, allow to run tests on redis and modules on a variety of environments.",
packages=find_packages(),
install_requires=[
'redis>=3.0.0',
'redis-py-cluster>=2.1.0',
'psutil',
'distro>=1.4.0'
],
entry_points='''
[console_scripts]
RLTest=RLTest.__main__:main
'''
)
|
en
| 0.238164
|
[console_scripts] RLTest=RLTest.__main__:main
| 1.320928
| 1
|
old_ARS_2018/tests/odometry_test.py
|
Brechard/Robot-Simulator
| 0
|
6627695
|
import unittest
import math
from bot import odometry as od
__author__ = '<NAME>'
class TestOdometry(unittest.TestCase):
def test_normal_dist(self):
tmp = od.prob_normal_dist(0, 1)
self.assertEqual(round(tmp, 10), 0.3989422804)
tmp = od.prob_normal_dist(1, 1)
self.assertEqual(round(tmp, 10), 0.2419707245)
tmp = od.prob_normal_dist(-1, 1)
self.assertEqual(round(tmp, 10), 0.2419707245)
tmp = od.prob_normal_dist(3, 1)
self.assertEqual(round(tmp, 10), 0.0044318484)
tmp = od.prob_normal_dist(6, 1)
self.assertEqual(round(tmp, 10), 0.0000000061)
tmp = od.prob_normal_dist(5, 5)
self.assertEqual(round(tmp, 10), 0.0483941449)
def test_traing_dist(self):
tmp = od.prob_triang_dist(0, 1)
self.assertEqual(round(tmp, 10), 0.4082482905)
tmp = od.prob_triang_dist(1, 1)
self.assertEqual(round(tmp, 10), 0.2415816238)
tmp = od.prob_triang_dist(-1, 1)
self.assertEqual(round(tmp, 10), 0.2415816238)
tmp = od.prob_triang_dist(3, 1)
self.assertEqual(tmp, 0)
tmp = od.prob_triang_dist(5, 5)
self.assertEqual(round(tmp, 10), 0.0483163248)
def test_atan2(self):
tmp = od.atan2(0, 0)
self.assertEqual(tmp, 0)
tmp = od.atan2(1, 1)
self.assertEqual(round(tmp, 10), 0.7853981634)
tmp = od.atan2(1, -1)
self.assertEqual(round(tmp, 10), 2.3561944902)
tmp = od.atan2(-1, 1)
self.assertEqual(round(tmp, 10), -0.7853981634)
tmp = od.atan2(-1, -1)
self.assertEqual(round(tmp, 10), -2.3561944902)
tmp = od.atan2(1, 0)
self.assertEqual(round(tmp, 10), 1.5707963268)
tmp = od.atan2(0, 1)
self.assertEqual(tmp, 0)
tmp = od.atan2(-1, 0)
self.assertEqual(round(tmp, 10), -1.5707963268)
tmp = od.atan2(0, -1)
self.assertEqual(round(tmp, 10), 3.1415926536)
def test_delta_trans(self):
tmp = od.delta_trans(1, 1, 1, 1)
self.assertEqual(tmp, 0)
tmp = od.delta_trans(1, 1, 1, 3)
self.assertEqual(tmp, 2)
tmp = od.delta_trans(1, 1, 2, 2)
self.assertEqual(round(tmp, 10), 1.4142135624)
tmp = od.delta_trans(1, 1, -1, -1)
self.assertEqual(round(tmp, 10), 2.8284271247)
def test_probabilities(self):
odometry = od.Odometry()
# set noise parameters
odometry.set_noise_params( [.1, .1, .1, .1] )
pos_t0 = (0, 0, 0) # current position
pos_t1 = (1, 0, 0) # position after move
u_t = [(0, 0, 0), (1, 0, 0)] # measured positions before and after move (from encoder data)
# get probability
prob = odometry.get_prob(pos_t0, pos_t1, u_t)
self.assertEqual(round(prob, 10), 63.4936359342)
# set another prob func
odometry.set_prob_func(od.prob_triang_dist)
# get probability
prob = odometry.get_prob(pos_t0, pos_t1, u_t)
self.assertEqual(round(prob, 10), 68.0413817440)
def test_sampling(self):
odometry = od.Odometry()
# set noise parameters
odometry.set_noise_params( [.1, .1, .1, .1] )
pos_t0 = (0, 0, 0) # current position
u_t = [(0, 0, 0), (1, 0, 0)] # measured positions before and after move (from encoder data)
# get position
pose = odometry.sample_motion_model(u_t, pos_t0)
self.assertTrue( math.fabs(pose[0] - u_t[1][0]) < .4 )
self.assertTrue( math.fabs(pose[1]) < .4 )
self.assertTrue( math.fabs(pose[2]) < 25 )
# set another prob func
odometry.set_sample_func(od.sample_triang_dist)
# get position
pose = odometry.sample_motion_model(u_t, pos_t0)
self.assertTrue( math.fabs(pose[0] - u_t[1][0]) < .4 )
self.assertTrue( math.fabs(pose[1]) < .4 )
self.assertTrue( math.fabs(pose[2]) < 25 )
if __name__ == '__main__':
unittest.main()
|
import unittest
import math
from bot import odometry as od
__author__ = '<NAME>'
class TestOdometry(unittest.TestCase):
def test_normal_dist(self):
tmp = od.prob_normal_dist(0, 1)
self.assertEqual(round(tmp, 10), 0.3989422804)
tmp = od.prob_normal_dist(1, 1)
self.assertEqual(round(tmp, 10), 0.2419707245)
tmp = od.prob_normal_dist(-1, 1)
self.assertEqual(round(tmp, 10), 0.2419707245)
tmp = od.prob_normal_dist(3, 1)
self.assertEqual(round(tmp, 10), 0.0044318484)
tmp = od.prob_normal_dist(6, 1)
self.assertEqual(round(tmp, 10), 0.0000000061)
tmp = od.prob_normal_dist(5, 5)
self.assertEqual(round(tmp, 10), 0.0483941449)
def test_traing_dist(self):
tmp = od.prob_triang_dist(0, 1)
self.assertEqual(round(tmp, 10), 0.4082482905)
tmp = od.prob_triang_dist(1, 1)
self.assertEqual(round(tmp, 10), 0.2415816238)
tmp = od.prob_triang_dist(-1, 1)
self.assertEqual(round(tmp, 10), 0.2415816238)
tmp = od.prob_triang_dist(3, 1)
self.assertEqual(tmp, 0)
tmp = od.prob_triang_dist(5, 5)
self.assertEqual(round(tmp, 10), 0.0483163248)
def test_atan2(self):
tmp = od.atan2(0, 0)
self.assertEqual(tmp, 0)
tmp = od.atan2(1, 1)
self.assertEqual(round(tmp, 10), 0.7853981634)
tmp = od.atan2(1, -1)
self.assertEqual(round(tmp, 10), 2.3561944902)
tmp = od.atan2(-1, 1)
self.assertEqual(round(tmp, 10), -0.7853981634)
tmp = od.atan2(-1, -1)
self.assertEqual(round(tmp, 10), -2.3561944902)
tmp = od.atan2(1, 0)
self.assertEqual(round(tmp, 10), 1.5707963268)
tmp = od.atan2(0, 1)
self.assertEqual(tmp, 0)
tmp = od.atan2(-1, 0)
self.assertEqual(round(tmp, 10), -1.5707963268)
tmp = od.atan2(0, -1)
self.assertEqual(round(tmp, 10), 3.1415926536)
def test_delta_trans(self):
tmp = od.delta_trans(1, 1, 1, 1)
self.assertEqual(tmp, 0)
tmp = od.delta_trans(1, 1, 1, 3)
self.assertEqual(tmp, 2)
tmp = od.delta_trans(1, 1, 2, 2)
self.assertEqual(round(tmp, 10), 1.4142135624)
tmp = od.delta_trans(1, 1, -1, -1)
self.assertEqual(round(tmp, 10), 2.8284271247)
def test_probabilities(self):
odometry = od.Odometry()
# set noise parameters
odometry.set_noise_params( [.1, .1, .1, .1] )
pos_t0 = (0, 0, 0) # current position
pos_t1 = (1, 0, 0) # position after move
u_t = [(0, 0, 0), (1, 0, 0)] # measured positions before and after move (from encoder data)
# get probability
prob = odometry.get_prob(pos_t0, pos_t1, u_t)
self.assertEqual(round(prob, 10), 63.4936359342)
# set another prob func
odometry.set_prob_func(od.prob_triang_dist)
# get probability
prob = odometry.get_prob(pos_t0, pos_t1, u_t)
self.assertEqual(round(prob, 10), 68.0413817440)
def test_sampling(self):
odometry = od.Odometry()
# set noise parameters
odometry.set_noise_params( [.1, .1, .1, .1] )
pos_t0 = (0, 0, 0) # current position
u_t = [(0, 0, 0), (1, 0, 0)] # measured positions before and after move (from encoder data)
# get position
pose = odometry.sample_motion_model(u_t, pos_t0)
self.assertTrue( math.fabs(pose[0] - u_t[1][0]) < .4 )
self.assertTrue( math.fabs(pose[1]) < .4 )
self.assertTrue( math.fabs(pose[2]) < 25 )
# set another prob func
odometry.set_sample_func(od.sample_triang_dist)
# get position
pose = odometry.sample_motion_model(u_t, pos_t0)
self.assertTrue( math.fabs(pose[0] - u_t[1][0]) < .4 )
self.assertTrue( math.fabs(pose[1]) < .4 )
self.assertTrue( math.fabs(pose[2]) < 25 )
if __name__ == '__main__':
unittest.main()
|
en
| 0.778583
|
# set noise parameters # current position # position after move # measured positions before and after move (from encoder data) # get probability # set another prob func # get probability # set noise parameters # current position # measured positions before and after move (from encoder data) # get position # set another prob func # get position
| 2.87761
| 3
|
test/functional/tachacoin_pos.py
|
tachacoin/tachacoin
| 0
|
6627696
|
<filename>test/functional/tachacoin_pos.py
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
from test_framework.address import *
from test_framework.key import ECKey
import io
import struct
class TachacoinPOSTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
self.tip = None
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
def _remove_from_staking_prevouts(self, block):
for j in range(len(self.staking_prevouts)):
prevout = self.staking_prevouts[j]
if prevout[0].serialize() == block.prevoutStake.serialize():
self.staking_prevouts.pop(j)
break
def run_test(self):
self.node = self.nodes[0]
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
self.node.importprivkey(privkey)
self.bootstrap_p2p()
# returns a test case that asserts that the current tip was accepted
# First generate some blocks so we have some spendable coins
block_hashes = self.node.generatetoaddress(100, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
for i in range(COINBASE_MATURITY):
self.tip = create_block(int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount()+1), int(time.time()))
self.tip.solve()
self.sync_blocks([self.tip], success=True)
for _ in range(10):
self.node.sendtoaddress("qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq", 1000)
block_hashes += self.node.generatetoaddress(1, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
blocks = []
for block_hash in block_hashes:
blocks.append(self.node.getblock(block_hash))
# These are our staking txs
self.staking_prevouts = []
self.bad_vout_staking_prevouts = []
self.bad_txid_staking_prevouts = []
self.unconfirmed_staking_prevouts = []
for unspent in self.node.listunspent():
for block in blocks:
if unspent['txid'] in block['tx']:
tx_block_time = block['time']
break
else:
assert(False)
if unspent['confirmations'] > COINBASE_MATURITY:
self.staking_prevouts.append((COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
self.bad_vout_staking_prevouts.append((COutPoint(int(unspent['txid'], 16), 0xff), int(unspent['amount'])*COIN, tx_block_time))
self.bad_txid_staking_prevouts.append((COutPoint(int(unspent['txid'], 16)+1, unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
if unspent['confirmations'] < COINBASE_MATURITY:
self.unconfirmed_staking_prevouts.append((COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
# First let 25 seconds pass so that we do not submit blocks directly after the last one
#time.sleep(100)
block_count = self.node.getblockcount()
# 1 A block that does not have the correct timestamp mask
t = int(time.time()) | 1
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 2 A block that with a too high reward
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, outNValue=30006)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 3 A block with an incorrect block sig
bad_key = ECKey()
bad_key.set(hash256(b'horse staple battery'), False)
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(bad_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 4 A block that stakes with txs with too few confirmations
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.unconfirmed_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 5 A block that with a coinbase reward
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout[0].nValue = 1
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 6 A block that with no vout in the coinbase
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 7 A block way into the future
t = (int(time.time())+100) & 0xfffffff0
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 8 No vout in the staking tx
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[1].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 9 Unsigned coinstake.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, signStakeTx=False)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False)
self._remove_from_staking_prevouts(self.tip)
# 10 A block without a coinstake tx.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx.pop(-1)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 11 A block without a coinbase.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx.pop(0)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 12 A block where the coinbase has no outputs
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 13 A block where the coinstake has no outputs
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[1].vout.pop(-1)
self.tip.vtx[1].vout.pop(-1)
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
self.tip.vtx[1] = CTransaction()
self.tip.vtx[1].deserialize(f)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 14 A block with an incorrect hashStateRoot
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.hashStateRoot = 0xe
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 15 A block with an incorrect hashUTXORoot
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.hashUTXORoot = 0xe
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 16 A block with an a signature on wrong header data
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.nNonce = 0xfffe
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 17 A block with where the pubkey of the second output of the coinstake has been modified after block signing
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
scriptPubKey = self.tip.vtx[1].vout[1].scriptPubKey
# Modify a byte of the pubkey
self.tip.vtx[1].vout[1].scriptPubKey = scriptPubKey[0:20] + bytes.fromhex(hex(ord(scriptPubKey[20:21])+1)[2:4]) + scriptPubKey[21:]
assert_equal(len(scriptPubKey), len(self.tip.vtx[1].vout[1].scriptPubKey))
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
self.tip.vtx[1] = CTransaction()
self.tip.vtx[1].deserialize(f)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 18. A block in the past
t = (int(time.time())-700) & 0xfffffff0
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 19. A block with too many coinbase vouts
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout.append(CTxOut(0, CScript([OP_TRUE])))
self.tip.vtx[0].rehash()
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 20. A block where the coinstake's vin is not the prevout specified in the block
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, coinStakePrevout=self.staking_prevouts[-1][0])
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 21. A block that stakes with valid txs but invalid vouts
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.bad_vout_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 22. A block that stakes with txs that do not exist
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.bad_txid_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# Make sure for certain that no blocks were accepted. (This is also to make sure that no segfaults ocurred)
assert_equal(self.node.getblockcount(), block_count)
# And at last, make sure that a valid pos block is accepted
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=True)
assert_equal(self.node.getblockcount(), block_count+1)
def create_unsigned_pos_block(self, staking_prevouts, nTime=None, outNValue=10002, signStakeTx=True, bestBlockHash=None, coinStakePrevout=None):
if not nTime:
current_time = int(time.time()) + 15
nTime = current_time & 0xfffffff0
if not bestBlockHash:
bestBlockHash = self.node.getbestblockhash()
block_height = self.node.getblockcount()
else:
block_height = self.node.getblock(bestBlockHash)['height']
parent_block_stake_modifier = int(self.node.getblock(bestBlockHash)['modifier'], 16)
parent_block_raw_hex = self.node.getblock(bestBlockHash, False)
f = io.BytesIO(hex_str_to_bytes(parent_block_raw_hex))
parent_block = CBlock()
parent_block.deserialize(f)
coinbase = create_coinbase(block_height+1)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b""
coinbase.rehash()
block = create_block(int(bestBlockHash, 16), coinbase, nTime)
block.hashPrevBlock = int(bestBlockHash, 16)
if not block.solve_stake(parent_block_stake_modifier, staking_prevouts):
return None
# create a new private key used for block signing.
block_sig_key = ECKey()
block_sig_key.set(hash256(struct.pack('<I', 0)), False)
pubkey = block_sig_key.get_pubkey().get_bytes()
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
stake_tx_unsigned = CTransaction()
if not coinStakePrevout:
coinStakePrevout = block.prevoutStake
stake_tx_unsigned.vin.append(CTxIn(coinStakePrevout))
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(int(outNValue*COIN), scriptPubKey))
stake_tx_unsigned.vout.append(CTxOut(int(outNValue*COIN), scriptPubKey))
if signStakeTx:
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(f)
block.vtx.append(stake_tx_signed)
else:
block.vtx.append(stake_tx_unsigned)
block.hashMerkleRoot = block.calc_merkle_root()
return (block, block_sig_key)
if __name__ == '__main__':
TachacoinPOSTest().main()
|
<filename>test/functional/tachacoin_pos.py
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
from test_framework.address import *
from test_framework.key import ECKey
import io
import struct
class TachacoinPOSTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
self.tip = None
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.nodes[0].add_p2p_connection(P2PDataStore())
# We need to wait for the initial getheaders from the peer before we
# start populating our blockstore. If we don't, then we may run ahead
# to the next subtest before we receive the getheaders. We'd then send
# an INV for the next block and receive two getheaders - one for the
# IBD and one for the INV. We'd respond to both and could get
# unexpectedly disconnected if the DoS score for that error is 50.
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p()
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
def _remove_from_staking_prevouts(self, block):
for j in range(len(self.staking_prevouts)):
prevout = self.staking_prevouts[j]
if prevout[0].serialize() == block.prevoutStake.serialize():
self.staking_prevouts.pop(j)
break
def run_test(self):
self.node = self.nodes[0]
privkey = byte_to_base58(hash256(struct.pack('<I', 0)), 239)
self.node.importprivkey(privkey)
self.bootstrap_p2p()
# returns a test case that asserts that the current tip was accepted
# First generate some blocks so we have some spendable coins
block_hashes = self.node.generatetoaddress(100, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
for i in range(COINBASE_MATURITY):
self.tip = create_block(int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount()+1), int(time.time()))
self.tip.solve()
self.sync_blocks([self.tip], success=True)
for _ in range(10):
self.node.sendtoaddress("qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq", 1000)
block_hashes += self.node.generatetoaddress(1, "qSrM9K6FMhZ29Vkp8Rdk8Jp66bbfpjFETq")
blocks = []
for block_hash in block_hashes:
blocks.append(self.node.getblock(block_hash))
# These are our staking txs
self.staking_prevouts = []
self.bad_vout_staking_prevouts = []
self.bad_txid_staking_prevouts = []
self.unconfirmed_staking_prevouts = []
for unspent in self.node.listunspent():
for block in blocks:
if unspent['txid'] in block['tx']:
tx_block_time = block['time']
break
else:
assert(False)
if unspent['confirmations'] > COINBASE_MATURITY:
self.staking_prevouts.append((COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
self.bad_vout_staking_prevouts.append((COutPoint(int(unspent['txid'], 16), 0xff), int(unspent['amount'])*COIN, tx_block_time))
self.bad_txid_staking_prevouts.append((COutPoint(int(unspent['txid'], 16)+1, unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
if unspent['confirmations'] < COINBASE_MATURITY:
self.unconfirmed_staking_prevouts.append((COutPoint(int(unspent['txid'], 16), unspent['vout']), int(unspent['amount'])*COIN, tx_block_time))
# First let 25 seconds pass so that we do not submit blocks directly after the last one
#time.sleep(100)
block_count = self.node.getblockcount()
# 1 A block that does not have the correct timestamp mask
t = int(time.time()) | 1
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 2 A block that with a too high reward
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, outNValue=30006)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 3 A block with an incorrect block sig
bad_key = ECKey()
bad_key.set(hash256(b'horse staple battery'), False)
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(bad_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 4 A block that stakes with txs with too few confirmations
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.unconfirmed_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 5 A block that with a coinbase reward
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout[0].nValue = 1
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 6 A block that with no vout in the coinbase
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 7 A block way into the future
t = (int(time.time())+100) & 0xfffffff0
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 8 No vout in the staking tx
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[1].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 9 Unsigned coinstake.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, signStakeTx=False)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False)
self._remove_from_staking_prevouts(self.tip)
# 10 A block without a coinstake tx.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx.pop(-1)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 11 A block without a coinbase.
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx.pop(0)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 12 A block where the coinbase has no outputs
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout = []
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 13 A block where the coinstake has no outputs
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[1].vout.pop(-1)
self.tip.vtx[1].vout.pop(-1)
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
self.tip.vtx[1] = CTransaction()
self.tip.vtx[1].deserialize(f)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 14 A block with an incorrect hashStateRoot
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.hashStateRoot = 0xe
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 15 A block with an incorrect hashUTXORoot
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.hashUTXORoot = 0xe
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 16 A block with an a signature on wrong header data
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.nNonce = 0xfffe
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 17 A block with where the pubkey of the second output of the coinstake has been modified after block signing
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
scriptPubKey = self.tip.vtx[1].vout[1].scriptPubKey
# Modify a byte of the pubkey
self.tip.vtx[1].vout[1].scriptPubKey = scriptPubKey[0:20] + bytes.fromhex(hex(ord(scriptPubKey[20:21])+1)[2:4]) + scriptPubKey[21:]
assert_equal(len(scriptPubKey), len(self.tip.vtx[1].vout[1].scriptPubKey))
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(self.tip.vtx[1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
self.tip.vtx[1] = CTransaction()
self.tip.vtx[1].deserialize(f)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 18. A block in the past
t = (int(time.time())-700) & 0xfffffff0
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, nTime=t)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 19. A block with too many coinbase vouts
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.vtx[0].vout.append(CTxOut(0, CScript([OP_TRUE])))
self.tip.vtx[0].rehash()
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 20. A block where the coinstake's vin is not the prevout specified in the block
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts, coinStakePrevout=self.staking_prevouts[-1][0])
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True)
self._remove_from_staking_prevouts(self.tip)
# 21. A block that stakes with valid txs but invalid vouts
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.bad_vout_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=False, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# 22. A block that stakes with txs that do not exist
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.bad_txid_staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=False, reconnect=True, force_send=True)
self._remove_from_staking_prevouts(self.tip)
# Make sure for certain that no blocks were accepted. (This is also to make sure that no segfaults ocurred)
assert_equal(self.node.getblockcount(), block_count)
# And at last, make sure that a valid pos block is accepted
(self.tip, block_sig_key) = self.create_unsigned_pos_block(self.staking_prevouts)
self.tip.sign_block(block_sig_key)
self.tip.rehash()
self.sync_blocks([self.tip], success=True)
assert_equal(self.node.getblockcount(), block_count+1)
def create_unsigned_pos_block(self, staking_prevouts, nTime=None, outNValue=10002, signStakeTx=True, bestBlockHash=None, coinStakePrevout=None):
if not nTime:
current_time = int(time.time()) + 15
nTime = current_time & 0xfffffff0
if not bestBlockHash:
bestBlockHash = self.node.getbestblockhash()
block_height = self.node.getblockcount()
else:
block_height = self.node.getblock(bestBlockHash)['height']
parent_block_stake_modifier = int(self.node.getblock(bestBlockHash)['modifier'], 16)
parent_block_raw_hex = self.node.getblock(bestBlockHash, False)
f = io.BytesIO(hex_str_to_bytes(parent_block_raw_hex))
parent_block = CBlock()
parent_block.deserialize(f)
coinbase = create_coinbase(block_height+1)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b""
coinbase.rehash()
block = create_block(int(bestBlockHash, 16), coinbase, nTime)
block.hashPrevBlock = int(bestBlockHash, 16)
if not block.solve_stake(parent_block_stake_modifier, staking_prevouts):
return None
# create a new private key used for block signing.
block_sig_key = ECKey()
block_sig_key.set(hash256(struct.pack('<I', 0)), False)
pubkey = block_sig_key.get_pubkey().get_bytes()
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
stake_tx_unsigned = CTransaction()
if not coinStakePrevout:
coinStakePrevout = block.prevoutStake
stake_tx_unsigned.vin.append(CTxIn(coinStakePrevout))
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(int(outNValue*COIN), scriptPubKey))
stake_tx_unsigned.vout.append(CTxOut(int(outNValue*COIN), scriptPubKey))
if signStakeTx:
stake_tx_signed_raw_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex))
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(f)
block.vtx.append(stake_tx_signed)
else:
block.vtx.append(stake_tx_unsigned)
block.hashMerkleRoot = block.calc_merkle_root()
return (block, block_sig_key)
if __name__ == '__main__':
TachacoinPOSTest().main()
|
en
| 0.907918
|
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Add a P2P connection to the node. Helper to connect and wait for version handshake. # We need to wait for the initial getheaders from the peer before we # start populating our blockstore. If we don't, then we may run ahead # to the next subtest before we receive the getheaders. We'd then send # an INV for the next block and receive two getheaders - one for the # IBD and one for the INV. We'd respond to both and could get # unexpectedly disconnected if the DoS score for that error is 50. Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread. Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block. # returns a test case that asserts that the current tip was accepted # First generate some blocks so we have some spendable coins # These are our staking txs # First let 25 seconds pass so that we do not submit blocks directly after the last one #time.sleep(100) # 1 A block that does not have the correct timestamp mask # 2 A block that with a too high reward # 3 A block with an incorrect block sig # 4 A block that stakes with txs with too few confirmations # 5 A block that with a coinbase reward # 6 A block that with no vout in the coinbase # 7 A block way into the future # 8 No vout in the staking tx # 9 Unsigned coinstake. # 10 A block without a coinstake tx. # 11 A block without a coinbase. # 12 A block where the coinbase has no outputs # 13 A block where the coinstake has no outputs # 14 A block with an incorrect hashStateRoot # 15 A block with an incorrect hashUTXORoot # 16 A block with an a signature on wrong header data # 17 A block with where the pubkey of the second output of the coinstake has been modified after block signing # Modify a byte of the pubkey # 18. A block in the past # 19. A block with too many coinbase vouts # 20. A block where the coinstake's vin is not the prevout specified in the block # 21. A block that stakes with valid txs but invalid vouts # 22. A block that stakes with txs that do not exist # Make sure for certain that no blocks were accepted. (This is also to make sure that no segfaults ocurred) # And at last, make sure that a valid pos block is accepted # create a new private key used for block signing.
| 2.068384
| 2
|
stayclean-2019-august/display.py
|
foobarbazblarg/stayclean
| 1
|
6627697
|
<reponame>foobarbazblarg/stayclean
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
currentMonthTotalDays = 31
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
# currentDayOfMonthIndex = 31
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 3 days. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2():
print '2\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the second day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor3():
print '3\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the last day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor4():
print '4\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Our 3 day late-signup grace period is now over. If you forgot to sign up, it's too late for CURRENT_MONTH_NAME, but feel free to leave comments here anyway, and we'll see you in NEXT_MONTH_NAME.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor5to9():
print '5 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15 - currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I will re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
# TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
if currentDayOfMonthIndex == 1:
return templateFor1()
elif currentDayOfMonthIndex == 2:
return templateFor2()
elif currentDayOfMonthIndex == 3:
return templateFor3()
elif currentDayOfMonthIndex == 4:
return templateFor4()
elif 5 <= currentDayOfMonthIndex <= 9:
return templateFor5to9()
elif 10 <= currentDayOfMonthIndex <= 14:
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
from participantCollection import ParticipantCollection
import re
import datetime
import pyperclip
# Edit Me!
currentMonthTotalDays = 31
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
# currentDayOfMonthIndex = 31
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
participants = ParticipantCollection()
numberStillIn = participants.sizeOfParticipantsWhoAreStillIn()
initialNumber = participants.size()
percentStillIn = int(round(100 * numberStillIn / initialNumber, 0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participants.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participants.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. ~~We will no longer be accepting new signups.~~ Good news! We will be be accepting late signups for the next 3 days. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2():
print '2\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the second day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor3():
print '3\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. This is the last day of our 3 day late-signup grace period. If you forgot to sign up for the CURRENT_MONTH_NAME challenge, just leave a \"sign me up\" comment below, and I'll add you. After today, further signup requests will be silently ignored.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- ~~We will not be accepting any new participants~~, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor4():
print '4\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Our 3 day late-signup grace period is now over. If you forgot to sign up, it's too late for CURRENT_MONTH_NAME, but feel free to leave comments here anyway, and we'll see you in NEXT_MONTH_NAME.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor5to9():
print '5 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15 - currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I will re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
# TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
if currentDayOfMonthIndex == 1:
return templateFor1()
elif currentDayOfMonthIndex == 2:
return templateFor2()
elif currentDayOfMonthIndex == 3:
return templateFor3()
elif currentDayOfMonthIndex == 4:
return templateFor4()
elif 5 <= currentDayOfMonthIndex <= 9:
return templateFor5to9()
elif 10 <= currentDayOfMonthIndex <= 14:
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
elif (currentDayOfMonthIndex >= 16) and (currentDayOfMonthIndex <= currentMonthPenultimateDayIndex):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub('NUMBER_STILL_IN', str(numberStillIn), answer)
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('PERCENT_STILL_IN', str(percentStillIn), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
en
| 0.698422
|
#!/usr/bin/python # TODO: issues with new oauth2 stuff. Keep using older version of Python for now. # #!/usr/bin/env python # Edit Me! # TODO: testing... # currentDayOfMonthIndex = 31 # print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" #gid=0).\n" # TODO: need to do the part where it lists the checked in and non-checked in participants separately.
| 2.486807
| 2
|
apps/classifier_grid/canvas.py
|
neuralbotnetworks/ncappzoo
| 968
|
6627698
|
<gh_stars>100-1000
import numpy as numpy
import cv2
class Canvas:
#IMAGES_ACROSS = 32
#IMAGES_DOWN = 12
BOTTOM_INFO_BAR_HEIGHT_MIN = 20
TOP_INFO_BAR_HEIGHT_MIN = 150
FPS_TEXT_ROW = 2
TIMER_TEXT_ROW = 1
INFERENCE_LABEL_TEXT_ROW = 1
PAUSE_TEXT_ROW = 1
LOADING_TEXT_ROW = 1
DONE_COUNT_TEXT_ROW = 2
PRESS_ANY_KEY_ROW = 3
TEXT_FONT = cv2.FONT_HERSHEY_SIMPLEX
def __init__(self, canvas_width:int, canvas_height:int, images_down:int, images_across:int):
self._images_down = images_down
self._images_across = images_across
self._grid_max_images = self._images_across * self._images_down
self._grid_max_images_str = str(self._grid_max_images)
self._text_scale = 1.0
self._text_background_color = (40, 40, 40)
self._text_color = (255, 255, 255) # white text
text_size = cv2.getTextSize("ZZ", Canvas.TEXT_FONT, self._text_scale, 1)[0]
self._text_height = text_size[1]
self._text_bg_height = self._text_height + 14
#total canvas dimensions
self._canvas_width = canvas_width
self._canvas_height = canvas_height
# for now no use for bottom bar
self._bottom_bar_height = int(self._canvas_height * 0.01)
if (self._bottom_bar_height < Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN):
self._bottom_bar_height = Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN
self._bottom_bar_width = self._canvas_width
self._top_bar_height = int(self._canvas_height * 0.1)
if (self._top_bar_height < Canvas.TOP_INFO_BAR_HEIGHT_MIN):
self._top_bar_height = Canvas.TOP_INFO_BAR_HEIGHT_MIN
self._top_bar_width = canvas_width
# top info bar
self._top_bar_left = 0
self._top_bar_right = self._top_bar_left + self._top_bar_width
self._top_bar_top = 0
self._top_bar_bottom = self._top_bar_top + self._top_bar_height
# bottom info bar
self._bottom_bar_left = 0
self._bottom_bar_right = self._bottom_bar_left + self._bottom_bar_width
self._bottom_bar_top = self._canvas_height - self._bottom_bar_height
self._bottom_bar_bottom = self._bottom_bar_top + self._bottom_bar_height
#grid dimensions
self._grid_top = 0 + self._top_bar_height
max_grid_height = self._canvas_height - self._bottom_bar_height - self._top_bar_height
max_grid_width = self._canvas_width
self._grid_line_thickness = 1
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._image_width = int((max_grid_width-1)/self._images_across)
self._image_height = int((max_grid_height-1)/self._images_down)
self._grid_width = self._images_across * self._image_width
self._grid_left = int((self._canvas_width - self._grid_width)/2)
self._grid_right = self._grid_left + self._grid_width
self._grid_height = self._images_down * self._image_height
self._grid_bottom = self._grid_top + self._grid_height
self._large_image_width = 112
self._large_image_height = 112
self._large_image_left = int(canvas_width/2) - int(self._large_image_width/2)
self._large_image_right = self._large_image_left + self._large_image_width
self._large_image_top = 8
self._large_image_bottom = self._large_image_top + self._large_image_height
# add some padding for the text that goes on top bar so not right against the edge of window
self._top_bar_text_left = self._top_bar_left + 10
self._top_bar_text_top = self._top_bar_top + 10
self._top_bar_text_left_width = (self._large_image_left - 10) - self._top_bar_text_left
self._grid_red = 128
self._grid_green = 128
self._grid_blue = 128
self._draw_grid_lines()
self._done_red = 255
self._done_green = 255
self._done_blue = 255
self._image_done_rect_thickness = 2
self._grid_image_list = list()
self._large_image_list = list()
self._draw_lines_large_to_grid = False
self._gird_undone_image_transparency = 0.6
self._num_bar_top_text_rows = 3
self._top_bar_text_row_tops = [None] * self._num_bar_top_text_rows
self._top_bar_text_row_tops[0] = 12
self._top_bar_text_row_tops[1] = self._top_bar_text_row_tops[0] + self._text_bg_height + 10
self._top_bar_text_row_tops[2] = self._top_bar_text_row_tops[1] + self._text_bg_height + 10
self._done_count = 0
def load_images(self, image_list:list):
self._grid_image_list.clear()
self._large_image_list.clear()
transparency = self._gird_undone_image_transparency
for image_index in range(0, len(image_list)):
if (image_index >= self._grid_max_images):
break
temp_large_image = cv2.resize(image_list[image_index], (self._large_image_width, self._large_image_height))
self._large_image_list.append(temp_large_image)
temp_image = cv2.resize(image_list[image_index], (self._image_width, self._image_height))
self._grid_image_list.append(temp_image)
self._draw_grid_image(image_index, transparency)
return
def reset_canvas(self):
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._draw_grid_lines()
self._draw_undone_images()
self._done_count = 0
def _draw_undone_images(self):
for image_index in range(0, len(self._grid_image_list)):
if (image_index >= self._grid_max_images):
break
self._draw_grid_image(image_index, self._gird_undone_image_transparency)
def _draw_grid_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[image_top:image_bottom, image_left:image_right] = \
cv2.addWeighted(self._canvas_image[image_top:image_bottom, image_left:image_right], transparency,
self._grid_image_list[image_index], 1.0 - transparency, 0.0)
def _draw_large_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
#image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right] = \
cv2.addWeighted(self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right], transparency,
self._large_image_list[image_index], 1.0 - transparency, 0.0)
def set_draw_lines(self, val:bool):
self._draw_lines_large_to_grid = val
def _draw_grid_lines(self):
blue = self._grid_blue
green = self._grid_green
red = self._grid_red
# lines going across
for line_index in range(0, self._images_down+1):
line_y = self._grid_top + (line_index * self._image_height)
cv2.line(self._canvas_image, (self._grid_left, line_y), (self._grid_right, line_y), (blue, green, red),
self._grid_line_thickness)
#lines going down
for line_index in range(0, self._images_across+1):
line_x = self._grid_left + (line_index * self._image_width)
cv2.line(self._canvas_image, (line_x, self._grid_top), (line_x, self._grid_top + ((self._images_down) * self._image_height)), (blue, green, red),
self._grid_line_thickness)
def mark_image_done(self, image_index:int, label_text:str=None):
self._done_count += 1
if (image_index >= self._grid_max_images):
return
self._draw_grid_image(image_index, 0.0)
self._draw_large_image(image_index, 0.0)
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
cv2.rectangle(self._canvas_image, (image_left, image_top), (image_right, image_bottom),
(self._done_blue, self._done_green, self._done_red), self._image_done_rect_thickness)
if (label_text != None):
self.draw_inference_label(label_text)
self.draw_done_count()
if (self._draw_lines_large_to_grid) :
cv2.line(self._canvas_image,
(image_left+int(self._image_width/2), image_top + int(self._image_height/2)),
(self._large_image_left + int(self._large_image_width/2), self._large_image_bottom), (255, 0, 0), 1)
def _get_image_square(self, image_index:int):
row = int(image_index / self._images_across)
col = image_index - (row * self._images_across)
image_left = self._grid_left + (self._image_width * col)
image_top = self._grid_top + (self._image_height * row)
image_right = image_left + self._image_width
image_bottom = image_top + self._image_height
return image_left, image_top, image_right, image_bottom
def get_canvas_image(self):
return self._canvas_image
def show_loading(self):
self._put_text_top_bar_left("Loading Images...", Canvas.LOADING_TEXT_ROW)
def clear_loading(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.LOADING_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_start(self):
self._put_text_top_bar_left("Paused...", Canvas.PAUSE_TEXT_ROW)
def press_any_key(self):
self._put_text_top_bar_left("Press any key to continue...", Canvas.PRESS_ANY_KEY_ROW)
def press_quit_key(self):
self._put_text_top_bar_left("Press q to quit.", Canvas.PRESS_ANY_KEY_ROW)
def show_device(self, device:str):
self._put_text_top_bar_right("Device: "+ device, Canvas.PRESS_ANY_KEY_ROW)
def clear_press_any_key(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PRESS_ANY_KEY_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_stop(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PAUSE_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def draw_inference_label(self, label_text:str):
self._put_text_top_bar_left(label_text, Canvas.INFERENCE_LABEL_TEXT_ROW)
def draw_done_count(self):
draw_str = "Images: " + str(self._done_count) +"/" + self._grid_max_images_str
self._put_text_top_bar_left(draw_str, Canvas.DONE_COUNT_TEXT_ROW)
def hide_done_count(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.DONE_COUNT_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def clear_top_bar(self):
clear_image = numpy.full((self._top_bar_height, self._top_bar_width, 3),
(0, 0, 0), numpy.uint8)
self._canvas_image[self._top_bar_top:self._top_bar_bottom, self._top_bar_left: self._top_bar_right] = clear_image
def show_fps(self, fps:float):
fps_str = "FPS: %2.1f" % fps
self._put_text_top_bar_right(fps_str, Canvas.FPS_TEXT_ROW)
def hide_fps(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.FPS_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def show_timer(self, time:float):
time_str = "Elapsed: %3.1f" % time
self._put_text_top_bar_right(time_str, Canvas.TIMER_TEXT_ROW)
def hide_timer(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.TIMER_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def _put_text_top_bar_right(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, -1, self._top_bar_text_row_tops[text_top_index], 0)
def _put_text_top_bar_left(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, self._top_bar_text_left, self._top_bar_text_row_tops[text_top_index], 0)
def _get_top_bar_right_text_leftmost(self):
return self._large_image_right + 10
def _get_top_bar_left_text_leftmost(self):
return self._top_bar_text_left
def _get_top_bar_right_text_bg_rect(self, text_row:int):
left = self._get_top_bar_right_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._canvas_width - 10
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _get_top_bar_left_text_bg_rect(self, text_row:int):
left = self._get_top_bar_left_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._top_bar_text_left + self._top_bar_text_left_width
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _put_text_on_canvas(self, text:str, text_left:int, text_top: int, text_min_width:int):
text_size = cv2.getTextSize(text, Canvas.TEXT_FONT, self._text_scale, 1)[0]
text_width = text_size[0]
text_height = text_size[1]
if (text_left == -1):
display_image_width = self._canvas_image.shape[1]
text_left = display_image_width - text_width - 10
text_bottom = text_top + text_height
cv2.putText(self._canvas_image, text, (text_left, text_bottom), cv2.FONT_HERSHEY_SIMPLEX, self._text_scale,
self._text_color, 1)
|
import numpy as numpy
import cv2
class Canvas:
#IMAGES_ACROSS = 32
#IMAGES_DOWN = 12
BOTTOM_INFO_BAR_HEIGHT_MIN = 20
TOP_INFO_BAR_HEIGHT_MIN = 150
FPS_TEXT_ROW = 2
TIMER_TEXT_ROW = 1
INFERENCE_LABEL_TEXT_ROW = 1
PAUSE_TEXT_ROW = 1
LOADING_TEXT_ROW = 1
DONE_COUNT_TEXT_ROW = 2
PRESS_ANY_KEY_ROW = 3
TEXT_FONT = cv2.FONT_HERSHEY_SIMPLEX
def __init__(self, canvas_width:int, canvas_height:int, images_down:int, images_across:int):
self._images_down = images_down
self._images_across = images_across
self._grid_max_images = self._images_across * self._images_down
self._grid_max_images_str = str(self._grid_max_images)
self._text_scale = 1.0
self._text_background_color = (40, 40, 40)
self._text_color = (255, 255, 255) # white text
text_size = cv2.getTextSize("ZZ", Canvas.TEXT_FONT, self._text_scale, 1)[0]
self._text_height = text_size[1]
self._text_bg_height = self._text_height + 14
#total canvas dimensions
self._canvas_width = canvas_width
self._canvas_height = canvas_height
# for now no use for bottom bar
self._bottom_bar_height = int(self._canvas_height * 0.01)
if (self._bottom_bar_height < Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN):
self._bottom_bar_height = Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN
self._bottom_bar_width = self._canvas_width
self._top_bar_height = int(self._canvas_height * 0.1)
if (self._top_bar_height < Canvas.TOP_INFO_BAR_HEIGHT_MIN):
self._top_bar_height = Canvas.TOP_INFO_BAR_HEIGHT_MIN
self._top_bar_width = canvas_width
# top info bar
self._top_bar_left = 0
self._top_bar_right = self._top_bar_left + self._top_bar_width
self._top_bar_top = 0
self._top_bar_bottom = self._top_bar_top + self._top_bar_height
# bottom info bar
self._bottom_bar_left = 0
self._bottom_bar_right = self._bottom_bar_left + self._bottom_bar_width
self._bottom_bar_top = self._canvas_height - self._bottom_bar_height
self._bottom_bar_bottom = self._bottom_bar_top + self._bottom_bar_height
#grid dimensions
self._grid_top = 0 + self._top_bar_height
max_grid_height = self._canvas_height - self._bottom_bar_height - self._top_bar_height
max_grid_width = self._canvas_width
self._grid_line_thickness = 1
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._image_width = int((max_grid_width-1)/self._images_across)
self._image_height = int((max_grid_height-1)/self._images_down)
self._grid_width = self._images_across * self._image_width
self._grid_left = int((self._canvas_width - self._grid_width)/2)
self._grid_right = self._grid_left + self._grid_width
self._grid_height = self._images_down * self._image_height
self._grid_bottom = self._grid_top + self._grid_height
self._large_image_width = 112
self._large_image_height = 112
self._large_image_left = int(canvas_width/2) - int(self._large_image_width/2)
self._large_image_right = self._large_image_left + self._large_image_width
self._large_image_top = 8
self._large_image_bottom = self._large_image_top + self._large_image_height
# add some padding for the text that goes on top bar so not right against the edge of window
self._top_bar_text_left = self._top_bar_left + 10
self._top_bar_text_top = self._top_bar_top + 10
self._top_bar_text_left_width = (self._large_image_left - 10) - self._top_bar_text_left
self._grid_red = 128
self._grid_green = 128
self._grid_blue = 128
self._draw_grid_lines()
self._done_red = 255
self._done_green = 255
self._done_blue = 255
self._image_done_rect_thickness = 2
self._grid_image_list = list()
self._large_image_list = list()
self._draw_lines_large_to_grid = False
self._gird_undone_image_transparency = 0.6
self._num_bar_top_text_rows = 3
self._top_bar_text_row_tops = [None] * self._num_bar_top_text_rows
self._top_bar_text_row_tops[0] = 12
self._top_bar_text_row_tops[1] = self._top_bar_text_row_tops[0] + self._text_bg_height + 10
self._top_bar_text_row_tops[2] = self._top_bar_text_row_tops[1] + self._text_bg_height + 10
self._done_count = 0
def load_images(self, image_list:list):
self._grid_image_list.clear()
self._large_image_list.clear()
transparency = self._gird_undone_image_transparency
for image_index in range(0, len(image_list)):
if (image_index >= self._grid_max_images):
break
temp_large_image = cv2.resize(image_list[image_index], (self._large_image_width, self._large_image_height))
self._large_image_list.append(temp_large_image)
temp_image = cv2.resize(image_list[image_index], (self._image_width, self._image_height))
self._grid_image_list.append(temp_image)
self._draw_grid_image(image_index, transparency)
return
def reset_canvas(self):
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._draw_grid_lines()
self._draw_undone_images()
self._done_count = 0
def _draw_undone_images(self):
for image_index in range(0, len(self._grid_image_list)):
if (image_index >= self._grid_max_images):
break
self._draw_grid_image(image_index, self._gird_undone_image_transparency)
def _draw_grid_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[image_top:image_bottom, image_left:image_right] = \
cv2.addWeighted(self._canvas_image[image_top:image_bottom, image_left:image_right], transparency,
self._grid_image_list[image_index], 1.0 - transparency, 0.0)
def _draw_large_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
#image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right] = \
cv2.addWeighted(self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right], transparency,
self._large_image_list[image_index], 1.0 - transparency, 0.0)
def set_draw_lines(self, val:bool):
self._draw_lines_large_to_grid = val
def _draw_grid_lines(self):
blue = self._grid_blue
green = self._grid_green
red = self._grid_red
# lines going across
for line_index in range(0, self._images_down+1):
line_y = self._grid_top + (line_index * self._image_height)
cv2.line(self._canvas_image, (self._grid_left, line_y), (self._grid_right, line_y), (blue, green, red),
self._grid_line_thickness)
#lines going down
for line_index in range(0, self._images_across+1):
line_x = self._grid_left + (line_index * self._image_width)
cv2.line(self._canvas_image, (line_x, self._grid_top), (line_x, self._grid_top + ((self._images_down) * self._image_height)), (blue, green, red),
self._grid_line_thickness)
def mark_image_done(self, image_index:int, label_text:str=None):
self._done_count += 1
if (image_index >= self._grid_max_images):
return
self._draw_grid_image(image_index, 0.0)
self._draw_large_image(image_index, 0.0)
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
cv2.rectangle(self._canvas_image, (image_left, image_top), (image_right, image_bottom),
(self._done_blue, self._done_green, self._done_red), self._image_done_rect_thickness)
if (label_text != None):
self.draw_inference_label(label_text)
self.draw_done_count()
if (self._draw_lines_large_to_grid) :
cv2.line(self._canvas_image,
(image_left+int(self._image_width/2), image_top + int(self._image_height/2)),
(self._large_image_left + int(self._large_image_width/2), self._large_image_bottom), (255, 0, 0), 1)
def _get_image_square(self, image_index:int):
row = int(image_index / self._images_across)
col = image_index - (row * self._images_across)
image_left = self._grid_left + (self._image_width * col)
image_top = self._grid_top + (self._image_height * row)
image_right = image_left + self._image_width
image_bottom = image_top + self._image_height
return image_left, image_top, image_right, image_bottom
def get_canvas_image(self):
return self._canvas_image
def show_loading(self):
self._put_text_top_bar_left("Loading Images...", Canvas.LOADING_TEXT_ROW)
def clear_loading(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.LOADING_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_start(self):
self._put_text_top_bar_left("Paused...", Canvas.PAUSE_TEXT_ROW)
def press_any_key(self):
self._put_text_top_bar_left("Press any key to continue...", Canvas.PRESS_ANY_KEY_ROW)
def press_quit_key(self):
self._put_text_top_bar_left("Press q to quit.", Canvas.PRESS_ANY_KEY_ROW)
def show_device(self, device:str):
self._put_text_top_bar_right("Device: "+ device, Canvas.PRESS_ANY_KEY_ROW)
def clear_press_any_key(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PRESS_ANY_KEY_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_stop(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PAUSE_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def draw_inference_label(self, label_text:str):
self._put_text_top_bar_left(label_text, Canvas.INFERENCE_LABEL_TEXT_ROW)
def draw_done_count(self):
draw_str = "Images: " + str(self._done_count) +"/" + self._grid_max_images_str
self._put_text_top_bar_left(draw_str, Canvas.DONE_COUNT_TEXT_ROW)
def hide_done_count(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.DONE_COUNT_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def clear_top_bar(self):
clear_image = numpy.full((self._top_bar_height, self._top_bar_width, 3),
(0, 0, 0), numpy.uint8)
self._canvas_image[self._top_bar_top:self._top_bar_bottom, self._top_bar_left: self._top_bar_right] = clear_image
def show_fps(self, fps:float):
fps_str = "FPS: %2.1f" % fps
self._put_text_top_bar_right(fps_str, Canvas.FPS_TEXT_ROW)
def hide_fps(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.FPS_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def show_timer(self, time:float):
time_str = "Elapsed: %3.1f" % time
self._put_text_top_bar_right(time_str, Canvas.TIMER_TEXT_ROW)
def hide_timer(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.TIMER_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def _put_text_top_bar_right(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, -1, self._top_bar_text_row_tops[text_top_index], 0)
def _put_text_top_bar_left(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, self._top_bar_text_left, self._top_bar_text_row_tops[text_top_index], 0)
def _get_top_bar_right_text_leftmost(self):
return self._large_image_right + 10
def _get_top_bar_left_text_leftmost(self):
return self._top_bar_text_left
def _get_top_bar_right_text_bg_rect(self, text_row:int):
left = self._get_top_bar_right_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._canvas_width - 10
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _get_top_bar_left_text_bg_rect(self, text_row:int):
left = self._get_top_bar_left_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._top_bar_text_left + self._top_bar_text_left_width
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _put_text_on_canvas(self, text:str, text_left:int, text_top: int, text_min_width:int):
text_size = cv2.getTextSize(text, Canvas.TEXT_FONT, self._text_scale, 1)[0]
text_width = text_size[0]
text_height = text_size[1]
if (text_left == -1):
display_image_width = self._canvas_image.shape[1]
text_left = display_image_width - text_width - 10
text_bottom = text_top + text_height
cv2.putText(self._canvas_image, text, (text_left, text_bottom), cv2.FONT_HERSHEY_SIMPLEX, self._text_scale,
self._text_color, 1)
|
en
| 0.707245
|
#IMAGES_ACROSS = 32 #IMAGES_DOWN = 12 # white text #total canvas dimensions # for now no use for bottom bar # top info bar # bottom info bar #grid dimensions #clear whole canvas to start # add some padding for the text that goes on top bar so not right against the edge of window #clear whole canvas to start #image_left, image_top, image_right, image_bottom = self._get_image_square(image_index) # lines going across #lines going down
| 2.948647
| 3
|
tests/models/test_simple.py
|
project-lolquiz/the-backend
| 0
|
6627699
|
<reponame>project-lolquiz/the-backend<filename>tests/models/test_simple.py
from unittest import mock
from models.simple import Simple
@mock.patch('models.simple.db.session')
@mock.patch('models.simple.Simple')
def test_add_new(mock_db, mock_connection):
new_simple = Simple('a simple object')
from_db = new_simple
from_db.id = 1
mock_db.add_new.return_value = from_db
mock_connection.commit.return_value = None
mock_connection.flush.return_value = None
response = new_simple.add_new()
assert response
assert response.id == from_db.id
assert response.name == new_simple.name
|
from unittest import mock
from models.simple import Simple
@mock.patch('models.simple.db.session')
@mock.patch('models.simple.Simple')
def test_add_new(mock_db, mock_connection):
new_simple = Simple('a simple object')
from_db = new_simple
from_db.id = 1
mock_db.add_new.return_value = from_db
mock_connection.commit.return_value = None
mock_connection.flush.return_value = None
response = new_simple.add_new()
assert response
assert response.id == from_db.id
assert response.name == new_simple.name
|
none
| 1
| 2.908357
| 3
|
|
EMDetector/test/data.py
|
jabae/detectEM
| 1
|
6627700
|
from __future__ import print_function
import imp
import numpy as np
import math
import torch
import torch.utils.data
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.nn as nn
from time import time
downsample = nn.AvgPool2d(kernel_size=(2,2), stride=(2,2), padding=0)
def worker_init_fn(worker_id):
# Each worker already has its own random state (Torch).
seed = torch.IntTensor(1).random_()[0]
# print("worker ID = {}, seed = {}".format(worker_id, seed))
np.random.seed(seed)
class Dataset(torch.utils.data.Dataset):
def __init__(self, multidataset, mip):
super(Dataset, self).__init__()
self.image = multidataset.image
self.mip = mip
self.size = self.image.shape[3]
def __len__(self):
return self.size
def __getitem__(self, idx):
image = self.image[:,:,:,idx]
sample = {"image": image}
sample["image"] = torch.from_numpy(sample["image"].copy())
for i in range(self.mip):
sample["image"] = downsample(sample["image"])
return sample
class Data(object):
def __init__(self, data, opt, is_train=True):
self.build(data, opt, is_train)
def __call__(self):
sample = next(self.dataiter)
for k in sample:
is_input = k in self.inputs
sample[k].requires_grad_(is_input)
sample[k] = sample[k].cuda(non_blocking=(not is_input))
return sample
def requires_grad(self, key):
return self.is_train and (key in self.inputs)
def build(self, data, opt, is_train):
dataset = Dataset(data, opt.mip)
dataloader = DataLoader(dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True,
worker_init_fn=worker_init_fn)
# Attributes
self.dataiter = iter(dataloader)
self.inputs = ['image']
self.is_train = is_train
|
from __future__ import print_function
import imp
import numpy as np
import math
import torch
import torch.utils.data
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.nn as nn
from time import time
downsample = nn.AvgPool2d(kernel_size=(2,2), stride=(2,2), padding=0)
def worker_init_fn(worker_id):
# Each worker already has its own random state (Torch).
seed = torch.IntTensor(1).random_()[0]
# print("worker ID = {}, seed = {}".format(worker_id, seed))
np.random.seed(seed)
class Dataset(torch.utils.data.Dataset):
def __init__(self, multidataset, mip):
super(Dataset, self).__init__()
self.image = multidataset.image
self.mip = mip
self.size = self.image.shape[3]
def __len__(self):
return self.size
def __getitem__(self, idx):
image = self.image[:,:,:,idx]
sample = {"image": image}
sample["image"] = torch.from_numpy(sample["image"].copy())
for i in range(self.mip):
sample["image"] = downsample(sample["image"])
return sample
class Data(object):
def __init__(self, data, opt, is_train=True):
self.build(data, opt, is_train)
def __call__(self):
sample = next(self.dataiter)
for k in sample:
is_input = k in self.inputs
sample[k].requires_grad_(is_input)
sample[k] = sample[k].cuda(non_blocking=(not is_input))
return sample
def requires_grad(self, key):
return self.is_train and (key in self.inputs)
def build(self, data, opt, is_train):
dataset = Dataset(data, opt.mip)
dataloader = DataLoader(dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True,
worker_init_fn=worker_init_fn)
# Attributes
self.dataiter = iter(dataloader)
self.inputs = ['image']
self.is_train = is_train
|
en
| 0.928176
|
# Each worker already has its own random state (Torch). # print("worker ID = {}, seed = {}".format(worker_id, seed)) # Attributes
| 2.588078
| 3
|
appdaemon/plugins/mqtt/mqttplugin.py
|
Rootie/appdaemon
| 443
|
6627701
|
<filename>appdaemon/plugins/mqtt/mqttplugin.py<gh_stars>100-1000
import copy
import paho.mqtt.client as mqtt
import asyncio
import traceback
import ssl
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
from appdaemon.plugin_management import PluginBase
class MqttPlugin(PluginBase):
def __init__(self, ad: AppDaemon, name, args):
super().__init__(ad, name, args)
"""Initialize MQTT Plugin."""
self.AD = ad
self.stopping = False
self.config = args
self.name = name
self.initialized = False
self.mqtt_connected = False
self.state = {}
self.logger.info("MQTT Plugin Initializing")
self.name = name
if "namespace" in self.config:
self.namespace = self.config["namespace"]
else:
self.namespace = "default"
self.mqtt_client_host = self.config.get("client_host", "127.0.0.1")
self.mqtt_client_port = self.config.get("client_port", 1883)
self.mqtt_qos = self.config.get("client_qos", 0)
mqtt_client_id = self.config.get("client_id", None)
mqtt_transport = self.config.get("client_transport", "tcp")
mqtt_session = self.config.get("client_clean_session", True)
self.mqtt_client_topics = self.config.get("client_topics", ["#"])
self.mqtt_client_user = self.config.get("client_user", None)
self.mqtt_client_password = self.config.get("client_password", None)
self.mqtt_event_name = self.config.get("event_name", "MQTT_MESSAGE")
self.mqtt_client_force_start = self.config.get("force_start", False)
status_topic = "{}/status".format(self.config.get("client_id", self.name + "-client").lower())
self.mqtt_will_topic = self.config.get("will_topic", None)
self.mqtt_on_connect_topic = self.config.get("birth_topic", None)
self.mqtt_will_retain = self.config.get("will_retain", True)
self.mqtt_on_connect_retain = self.config.get("birth_retain", True)
if self.mqtt_client_topics == "NONE":
self.mqtt_client_topics = []
if self.mqtt_will_topic is None:
self.mqtt_will_topic = status_topic
self.logger.info("Using %r as Will Topic", status_topic)
if self.mqtt_on_connect_topic is None:
self.mqtt_on_connect_topic = status_topic
self.logger.info("Using %r as Birth Topic", status_topic)
self.mqtt_will_payload = self.config.get("will_payload", "offline")
self.mqtt_on_connect_payload = self.config.get("birth_payload", "online")
self.mqtt_shutdown_payload = self.config.get("shutdown_payload", self.mqtt_will_payload)
self.mqtt_client_tls_ca_cert = self.config.get("ca_cert", None)
self.mqtt_client_tls_client_cert = self.config.get("client_cert", None)
self.mqtt_client_tls_client_key = self.config.get("client_key", None)
self.mqtt_verify_cert = self.config.get("verify_cert", True)
self.mqtt_tls_version = self.config.get("tls_version", "auto")
if self.mqtt_tls_version == "1.2":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1_2
elif self.mqtt_tls_version == "1.1":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1_1
elif self.mqtt_tls_version == "1.0":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
if sys.hexversion >= 0x03060000:
self.mqtt_tls_version = ssl.PROTOCOL_TLS
else:
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1
self.mqtt_client_timeout = self.config.get("client_timeout", 60)
if mqtt_client_id is None:
mqtt_client_id = "appdaemon_{}_client".format(self.name.lower())
self.logger.info("Using %s as Client ID", mqtt_client_id)
self.mqtt_client = mqtt.Client(client_id=mqtt_client_id, clean_session=mqtt_session, transport=mqtt_transport,)
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_client.on_message = self.mqtt_on_message
self.loop = self.AD.loop # get AD loop
self.mqtt_wildcards = list()
self.mqtt_binary_topics = list()
self.mqtt_metadata = {
"version": "1.0",
"host": self.mqtt_client_host,
"port": self.mqtt_client_port,
"client_id": mqtt_client_id,
"transport": mqtt_transport,
"clean_session": mqtt_session,
"qos": self.mqtt_qos,
"topics": self.mqtt_client_topics,
"username": self.mqtt_client_user,
"password": <PASSWORD>,
"event_name": self.mqtt_event_name,
"status_topic": status_topic,
"will_topic": self.mqtt_will_topic,
"will_payload": self.mqtt_will_payload,
"will_retain": self.mqtt_will_retain,
"birth_topic": self.mqtt_on_connect_topic,
"birth_payload": self.mqtt_on_connect_payload,
"birth_retain": self.mqtt_on_connect_retain,
"shutdown_payload": self.mqtt_shutdown_payload,
"ca_cert": self.mqtt_client_tls_ca_cert,
"client_cert": self.mqtt_client_tls_client_cert,
"client_key": self.mqtt_client_tls_client_key,
"verify_cert": self.mqtt_verify_cert,
"tls_version": self.mqtt_tls_version,
"timeout": self.mqtt_client_timeout,
"force_state": self.mqtt_client_force_start,
}
self.mqtt_connect_event = None
def stop(self):
self.logger.debug("stop() called for %s", self.name)
self.stopping = True
if self.mqtt_connected:
self.logger.info(
"Stopping MQTT Plugin and Unsubscribing from URL %s:%s", self.mqtt_client_host, self.mqtt_client_port,
)
for topic in self.mqtt_client_topics:
self.mqtt_unsubscribe(topic)
self.mqtt_client.publish(
self.mqtt_will_topic, self.mqtt_shutdown_payload, self.mqtt_qos, retain=self.mqtt_will_retain,
)
self.mqtt_client.disconnect() # disconnect cleanly
self.mqtt_client.loop_stop()
#
# Placeholder for constraints
#
def list_constraints(self):
return []
def mqtt_on_connect(self, client, userdata, flags, rc):
try:
err_msg = ""
# means connection was successful
if rc == 0:
self.mqtt_client.publish(
self.mqtt_on_connect_topic,
self.mqtt_on_connect_payload,
self.mqtt_qos,
retain=self.mqtt_on_connect_retain,
)
self.logger.info(
"Connected to Broker at URL %s:%s", self.mqtt_client_host, self.mqtt_client_port,
)
#
# Register MQTT Services
#
self.AD.services.register_service(self.namespace, "mqtt", "subscribe", self.call_plugin_service)
self.AD.services.register_service(self.namespace, "mqtt", "unsubscribe", self.call_plugin_service)
self.AD.services.register_service(self.namespace, "mqtt", "publish", self.call_plugin_service)
topics = copy.deepcopy(self.mqtt_client_topics)
for topic in topics:
self.mqtt_subscribe(topic, self.mqtt_qos)
self.mqtt_connected = True
data = {
"event_type": self.mqtt_event_name,
"data": {"state": "Connected", "topic": None, "wildcard": None},
}
self.loop.create_task(self.send_ad_event(data))
elif rc == 1:
err_msg = "Connection was refused due to Incorrect Protocol Version"
elif rc == 2:
err_msg = "Connection was refused due to Invalid Client Identifier"
elif rc == 3:
err_msg = "Connection was refused due to Server Unavailable"
elif rc == 4:
err_msg = "Connection was refused due to Bad Username or Password"
elif rc == 5:
err_msg = "Connection was refused due to Not Authorised"
else:
err_msg = "Connection was refused. Please check configuration settings"
# means there was an error
if err_msg != "":
self.logger.critical("Could not complete MQTT Plugin initialization, for %s", err_msg)
# continue processing
self.mqtt_connect_event.set()
except Exception:
self.logger.critical("There was an error while trying to setup the Mqtt Service")
self.logger.debug(
"There was an error while trying to setup the MQTT Service, with Traceback: %s", traceback.format_exc(),
)
def mqtt_on_disconnect(self, client, userdata, rc):
try:
# unexpected disconnection
if rc != 0 and not self.stopping:
self.initialized = False
self.mqtt_connected = False
self.logger.critical("MQTT Client Disconnected Abruptly. Will attempt reconnection")
self.logger.debug("Return code: %s", rc)
self.logger.debug("userdata: %s", userdata)
data = {
"event_type": self.mqtt_event_name,
"data": {"state": "Disconnected", "topic": None, "wildcard": None},
}
self.loop.create_task(self.send_ad_event(data))
return
except Exception:
self.logger.critical("There was an error while disconnecting from the Mqtt Service")
self.logger.debug(
"There was an error while disconnecting from the MQTT Service, with Traceback: %s",
traceback.format_exc(),
)
def mqtt_on_message(self, client, userdata, msg):
try:
self.logger.debug("Message Received: Topic = %s, Payload = %s", msg.topic, msg.payload)
topic = msg.topic
payload = msg.payload
wildcard = None
data = {"topic": topic}
if self.mqtt_wildcards != []:
# now check if the topic belongs to any of the wildcards
for sub in self.mqtt_wildcards:
if mqtt.topic_matches_sub(sub, topic):
wildcard = sub
break
if topic not in self.mqtt_binary_topics and wildcard not in self.mqtt_binary_topics:
# the binary data is not required
payload = payload.decode()
data.update({"wildcard": wildcard, "payload": payload})
event_data = {
"event_type": self.mqtt_event_name,
"data": data,
}
self.loop.create_task(self.send_ad_event(event_data))
except UnicodeDecodeError:
self.logger.info("Unable to decode MQTT message")
self.logger.debug(
"Unable to decode MQTT message, with Traceback: %s", traceback.format_exc(),
)
except Exception as e:
self.logger.critical("There was an error while processing an MQTT message: {} {}".format(type(e), e))
self.logger.debug(
"There was an error while processing an MQTT message, with Traceback: %s", traceback.format_exc(),
)
def mqtt_subscribe(self, topic, qos):
self.logger.debug("Subscribing to Topic: %s, with Qos %s", topic, qos)
result = None
try:
result = self.mqtt_client.subscribe(topic, qos)
if result[0] == 0:
self.logger.debug("Subscription to Topic %s Successful", topic)
if topic not in self.mqtt_client_topics:
self.mqtt_client_topics.append(topic)
if "#" in topic or "+" in topic:
# its a wildcard
self.add_mqtt_wildcard(topic)
else:
if topic in self.mqtt_client_topics:
self.mqtt_client_topics.remove(topic)
self.logger.debug(
"Subscription to Topic %s Unsuccessful, as Client possibly not currently connected", topic,
)
except Exception as e:
self.logger.warning("There was an error while subscribing to topic %s, %s", topic, e)
self.logger.debug(traceback.format_exc())
return result
def mqtt_unsubscribe(self, topic):
self.logger.debug("Unsubscribing from Topic: %s", topic)
result = None
try:
result = self.mqtt_client.unsubscribe(topic)
if result[0] == 0:
self.logger.debug("Unsubscription from Topic %s Successful", topic)
if topic in self.mqtt_client_topics:
self.mqtt_client_topics.remove(topic)
self.remove_mqtt_binary(topic)
self.remove_mqtt_wildcard(topic)
else:
self.logger.warning("Unsubscription from Topic %s was not Successful", topic)
except Exception as e:
self.logger.warning("There was an error while unsubscribing from topic %s, %s", topic, e)
self.logger.debug(traceback.format_exc())
return result
async def call_plugin_service(self, namespace, domain, service, kwargs):
result = None
if "topic" in kwargs:
if not self.mqtt_connected: # ensure mqtt plugin is connected
self.logger.warning("Attempt to call Mqtt Service while disconnected: %s", service)
return None
try:
topic = kwargs["topic"]
payload = kwargs.get("payload", None)
retain = kwargs.get("retain", False)
qos = int(kwargs.get("qos", self.mqtt_qos))
if service == "publish":
self.logger.debug("Publish Payload: %s to Topic: %s", payload, topic)
result = await utils.run_in_executor(self, self.mqtt_client.publish, topic, payload, qos, retain)
if result[0] == 0:
self.logger.debug(
"Publishing Payload %s to Topic %s Successful", payload, topic,
)
else:
self.logger.warning(
"Publishing Payload %s to Topic %s was not Successful", payload, topic,
)
elif service == "subscribe":
if topic not in self.mqtt_client_topics:
result = await utils.run_in_executor(self, self.mqtt_subscribe, topic, qos)
else:
self.logger.info("Topic %s already subscribed to", topic)
elif service == "unsubscribe":
if topic in self.mqtt_client_topics:
result = await utils.run_in_executor(self, self.mqtt_unsubscribe, topic)
else:
self.logger.info("Topic %s already unsubscribed from", topic)
else:
self.logger.warning("Wrong Service Call %s for MQTT", service)
result = "ERR"
except Exception as e:
config = self.config
if config["type"] == "mqtt":
self.logger.debug(
"Got the following Error %s, when trying to retrieve Mqtt Plugin", e,
)
return str(e)
else:
self.logger.critical(
"Wrong Namespace %s selected for MQTT Service. Please use proper namespace before trying again",
namespace,
)
return "ERR"
else:
self.logger.warning("Topic not provided for Service Call {!r}.".format(service))
raise ValueError("Topic not provided, please provide Topic for Service Call")
return result
def add_mqtt_wildcard(self, wildcard):
"""Used to add to the plugin wildcard"""
if wildcard not in self.mqtt_wildcards:
self.mqtt_wildcards.append(wildcard)
return True
return False
def remove_mqtt_wildcard(self, wildcard):
"""Used to remove remove from the plugin wildcard"""
if wildcard in self.mqtt_wildcards:
self.mqtt_wildcards.remove(wildcard)
return True
return False
def add_mqtt_binary(self, topic):
"""Used to add to the plugin binary topic"""
if topic not in self.mqtt_binary_topics:
self.mqtt_binary_topics.append(topic)
return True
return False
def remove_mqtt_binary(self, topic):
"""Used to remove from the plugin binary topic"""
if topic in self.mqtt_binary_topics:
self.mqtt_binary_topics.remove(topic)
return True
return False
async def mqtt_client_state(self):
return self.mqtt_connected
async def send_ad_event(self, data):
await self.AD.events.process_event(self.namespace, data)
#
# Get initial state
#
async def get_complete_state(self):
self.logger.debug("*** Sending Complete State: %s ***", self.state)
return copy.deepcopy(self.state)
async def get_metadata(self):
return self.mqtt_metadata
#
# Utility gets called every second (or longer if configured
# Allows plugin to do any housekeeping required
#
def utility(self):
# self.logger.info("utility".format(self.state)
return
#
# Handle state updates
#
async def get_updates(self):
already_initialized = False
already_notified = False
first_time = True
first_time_service = True
self.mqtt_connect_event = asyncio.Event()
while not self.stopping:
while (
not self.initialized or not already_initialized
) and not self.stopping: # continue until initialization is successful
if (
not already_initialized and not already_notified
): # if it had connected before, it need not run this. Run if just trying for the first time
try:
await asyncio.wait_for(
utils.run_in_executor(self, self.start_mqtt_service, first_time_service), 5.0
)
await asyncio.wait_for(
self.mqtt_connect_event.wait(), 5.0
) # wait for it to return true for 5 seconds in case still processing connect
except asyncio.TimeoutError:
self.logger.critical(
"Could not Complete Connection to Broker, please Ensure Broker at URL %s:%s is correct and broker is not down and restart Appdaemon",
self.mqtt_client_host,
self.mqtt_client_port,
)
# meaning it should start anyway even if broker is down
if self.mqtt_client_force_start:
self.mqtt_connected = True
else:
self.mqtt_client.loop_stop()
# disconnect so it won't attempt reconnection if the broker was to come up
self.mqtt_client.disconnect()
first_time_service = False
state = await self.get_complete_state()
meta = await self.get_metadata()
# meaning the client has connected to the broker
if self.mqtt_connected:
await self.AD.plugins.notify_plugin_started(self.name, self.namespace, meta, state, first_time)
already_notified = False
already_initialized = True
self.logger.info("MQTT Plugin initialization complete")
self.initialized = True
else:
if not already_notified and already_initialized:
await self.AD.plugins.notify_plugin_stopped(self.name, self.namespace)
self.logger.critical("MQTT Plugin Stopped Unexpectedly")
already_notified = True
already_initialized = False
first_time = False
if not already_initialized and not already_notified:
self.logger.critical("Could not complete MQTT Plugin initialization, trying again in 5 seconds")
if self.stopping:
break
else:
self.logger.critical(
"Unable to reinitialize MQTT Plugin, will keep trying again until complete"
)
await asyncio.sleep(5)
await asyncio.sleep(5)
def get_namespace(self):
return self.namespace
def start_mqtt_service(self, first_time):
try:
# used to wait for connection
self.mqtt_connect_event.clear()
if first_time:
if self.mqtt_client_user is not None:
self.mqtt_client.username_pw_set(self.mqtt_client_user, password=self.mqtt_client_password)
set_tls = False
auth = {"tls_version": self.mqtt_tls_version}
if self.mqtt_client_tls_ca_cert is not None:
auth.update({"ca_certs": self.mqtt_client_tls_ca_cert})
set_tls = True
if self.mqtt_client_tls_client_cert is not None:
auth.update({"certfile": self.mqtt_client_tls_client_cert})
set_tls = True
if self.mqtt_client_tls_client_key is not None:
auth.update({"keyfile": self.mqtt_client_tls_client_key})
set_tls = True
if set_tls is True:
self.mqtt_client.tls_set(**auth)
if not self.mqtt_verify_cert:
self.mqtt_client.tls_insecure_set(not self.mqtt_verify_cert)
self.mqtt_client.will_set(
self.mqtt_will_topic, self.mqtt_will_payload, self.mqtt_qos, retain=self.mqtt_will_retain,
)
self.mqtt_client.connect_async(self.mqtt_client_host, self.mqtt_client_port, self.mqtt_client_timeout)
self.mqtt_client.loop_start()
except Exception as e:
self.logger.critical(
"There was an error while trying to setup the Mqtt Service. Error was: %s", e,
)
self.logger.debug(
"There was an error while trying to setup the MQTT Service. Error: %s, with Traceback: %s",
e,
traceback.format_exc(),
)
self.logger.debug(
"There was an error while trying to setup the MQTT Service, with Traceback: %s", traceback.format_exc(),
)
return
|
<filename>appdaemon/plugins/mqtt/mqttplugin.py<gh_stars>100-1000
import copy
import paho.mqtt.client as mqtt
import asyncio
import traceback
import ssl
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
from appdaemon.plugin_management import PluginBase
class MqttPlugin(PluginBase):
def __init__(self, ad: AppDaemon, name, args):
super().__init__(ad, name, args)
"""Initialize MQTT Plugin."""
self.AD = ad
self.stopping = False
self.config = args
self.name = name
self.initialized = False
self.mqtt_connected = False
self.state = {}
self.logger.info("MQTT Plugin Initializing")
self.name = name
if "namespace" in self.config:
self.namespace = self.config["namespace"]
else:
self.namespace = "default"
self.mqtt_client_host = self.config.get("client_host", "127.0.0.1")
self.mqtt_client_port = self.config.get("client_port", 1883)
self.mqtt_qos = self.config.get("client_qos", 0)
mqtt_client_id = self.config.get("client_id", None)
mqtt_transport = self.config.get("client_transport", "tcp")
mqtt_session = self.config.get("client_clean_session", True)
self.mqtt_client_topics = self.config.get("client_topics", ["#"])
self.mqtt_client_user = self.config.get("client_user", None)
self.mqtt_client_password = self.config.get("client_password", None)
self.mqtt_event_name = self.config.get("event_name", "MQTT_MESSAGE")
self.mqtt_client_force_start = self.config.get("force_start", False)
status_topic = "{}/status".format(self.config.get("client_id", self.name + "-client").lower())
self.mqtt_will_topic = self.config.get("will_topic", None)
self.mqtt_on_connect_topic = self.config.get("birth_topic", None)
self.mqtt_will_retain = self.config.get("will_retain", True)
self.mqtt_on_connect_retain = self.config.get("birth_retain", True)
if self.mqtt_client_topics == "NONE":
self.mqtt_client_topics = []
if self.mqtt_will_topic is None:
self.mqtt_will_topic = status_topic
self.logger.info("Using %r as Will Topic", status_topic)
if self.mqtt_on_connect_topic is None:
self.mqtt_on_connect_topic = status_topic
self.logger.info("Using %r as Birth Topic", status_topic)
self.mqtt_will_payload = self.config.get("will_payload", "offline")
self.mqtt_on_connect_payload = self.config.get("birth_payload", "online")
self.mqtt_shutdown_payload = self.config.get("shutdown_payload", self.mqtt_will_payload)
self.mqtt_client_tls_ca_cert = self.config.get("ca_cert", None)
self.mqtt_client_tls_client_cert = self.config.get("client_cert", None)
self.mqtt_client_tls_client_key = self.config.get("client_key", None)
self.mqtt_verify_cert = self.config.get("verify_cert", True)
self.mqtt_tls_version = self.config.get("tls_version", "auto")
if self.mqtt_tls_version == "1.2":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1_2
elif self.mqtt_tls_version == "1.1":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1_1
elif self.mqtt_tls_version == "1.0":
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1
else:
import sys
if sys.hexversion >= 0x03060000:
self.mqtt_tls_version = ssl.PROTOCOL_TLS
else:
self.mqtt_tls_version = ssl.PROTOCOL_TLSv1
self.mqtt_client_timeout = self.config.get("client_timeout", 60)
if mqtt_client_id is None:
mqtt_client_id = "appdaemon_{}_client".format(self.name.lower())
self.logger.info("Using %s as Client ID", mqtt_client_id)
self.mqtt_client = mqtt.Client(client_id=mqtt_client_id, clean_session=mqtt_session, transport=mqtt_transport,)
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
self.mqtt_client.on_message = self.mqtt_on_message
self.loop = self.AD.loop # get AD loop
self.mqtt_wildcards = list()
self.mqtt_binary_topics = list()
self.mqtt_metadata = {
"version": "1.0",
"host": self.mqtt_client_host,
"port": self.mqtt_client_port,
"client_id": mqtt_client_id,
"transport": mqtt_transport,
"clean_session": mqtt_session,
"qos": self.mqtt_qos,
"topics": self.mqtt_client_topics,
"username": self.mqtt_client_user,
"password": <PASSWORD>,
"event_name": self.mqtt_event_name,
"status_topic": status_topic,
"will_topic": self.mqtt_will_topic,
"will_payload": self.mqtt_will_payload,
"will_retain": self.mqtt_will_retain,
"birth_topic": self.mqtt_on_connect_topic,
"birth_payload": self.mqtt_on_connect_payload,
"birth_retain": self.mqtt_on_connect_retain,
"shutdown_payload": self.mqtt_shutdown_payload,
"ca_cert": self.mqtt_client_tls_ca_cert,
"client_cert": self.mqtt_client_tls_client_cert,
"client_key": self.mqtt_client_tls_client_key,
"verify_cert": self.mqtt_verify_cert,
"tls_version": self.mqtt_tls_version,
"timeout": self.mqtt_client_timeout,
"force_state": self.mqtt_client_force_start,
}
self.mqtt_connect_event = None
def stop(self):
self.logger.debug("stop() called for %s", self.name)
self.stopping = True
if self.mqtt_connected:
self.logger.info(
"Stopping MQTT Plugin and Unsubscribing from URL %s:%s", self.mqtt_client_host, self.mqtt_client_port,
)
for topic in self.mqtt_client_topics:
self.mqtt_unsubscribe(topic)
self.mqtt_client.publish(
self.mqtt_will_topic, self.mqtt_shutdown_payload, self.mqtt_qos, retain=self.mqtt_will_retain,
)
self.mqtt_client.disconnect() # disconnect cleanly
self.mqtt_client.loop_stop()
#
# Placeholder for constraints
#
def list_constraints(self):
return []
def mqtt_on_connect(self, client, userdata, flags, rc):
try:
err_msg = ""
# means connection was successful
if rc == 0:
self.mqtt_client.publish(
self.mqtt_on_connect_topic,
self.mqtt_on_connect_payload,
self.mqtt_qos,
retain=self.mqtt_on_connect_retain,
)
self.logger.info(
"Connected to Broker at URL %s:%s", self.mqtt_client_host, self.mqtt_client_port,
)
#
# Register MQTT Services
#
self.AD.services.register_service(self.namespace, "mqtt", "subscribe", self.call_plugin_service)
self.AD.services.register_service(self.namespace, "mqtt", "unsubscribe", self.call_plugin_service)
self.AD.services.register_service(self.namespace, "mqtt", "publish", self.call_plugin_service)
topics = copy.deepcopy(self.mqtt_client_topics)
for topic in topics:
self.mqtt_subscribe(topic, self.mqtt_qos)
self.mqtt_connected = True
data = {
"event_type": self.mqtt_event_name,
"data": {"state": "Connected", "topic": None, "wildcard": None},
}
self.loop.create_task(self.send_ad_event(data))
elif rc == 1:
err_msg = "Connection was refused due to Incorrect Protocol Version"
elif rc == 2:
err_msg = "Connection was refused due to Invalid Client Identifier"
elif rc == 3:
err_msg = "Connection was refused due to Server Unavailable"
elif rc == 4:
err_msg = "Connection was refused due to Bad Username or Password"
elif rc == 5:
err_msg = "Connection was refused due to Not Authorised"
else:
err_msg = "Connection was refused. Please check configuration settings"
# means there was an error
if err_msg != "":
self.logger.critical("Could not complete MQTT Plugin initialization, for %s", err_msg)
# continue processing
self.mqtt_connect_event.set()
except Exception:
self.logger.critical("There was an error while trying to setup the Mqtt Service")
self.logger.debug(
"There was an error while trying to setup the MQTT Service, with Traceback: %s", traceback.format_exc(),
)
def mqtt_on_disconnect(self, client, userdata, rc):
try:
# unexpected disconnection
if rc != 0 and not self.stopping:
self.initialized = False
self.mqtt_connected = False
self.logger.critical("MQTT Client Disconnected Abruptly. Will attempt reconnection")
self.logger.debug("Return code: %s", rc)
self.logger.debug("userdata: %s", userdata)
data = {
"event_type": self.mqtt_event_name,
"data": {"state": "Disconnected", "topic": None, "wildcard": None},
}
self.loop.create_task(self.send_ad_event(data))
return
except Exception:
self.logger.critical("There was an error while disconnecting from the Mqtt Service")
self.logger.debug(
"There was an error while disconnecting from the MQTT Service, with Traceback: %s",
traceback.format_exc(),
)
def mqtt_on_message(self, client, userdata, msg):
try:
self.logger.debug("Message Received: Topic = %s, Payload = %s", msg.topic, msg.payload)
topic = msg.topic
payload = msg.payload
wildcard = None
data = {"topic": topic}
if self.mqtt_wildcards != []:
# now check if the topic belongs to any of the wildcards
for sub in self.mqtt_wildcards:
if mqtt.topic_matches_sub(sub, topic):
wildcard = sub
break
if topic not in self.mqtt_binary_topics and wildcard not in self.mqtt_binary_topics:
# the binary data is not required
payload = payload.decode()
data.update({"wildcard": wildcard, "payload": payload})
event_data = {
"event_type": self.mqtt_event_name,
"data": data,
}
self.loop.create_task(self.send_ad_event(event_data))
except UnicodeDecodeError:
self.logger.info("Unable to decode MQTT message")
self.logger.debug(
"Unable to decode MQTT message, with Traceback: %s", traceback.format_exc(),
)
except Exception as e:
self.logger.critical("There was an error while processing an MQTT message: {} {}".format(type(e), e))
self.logger.debug(
"There was an error while processing an MQTT message, with Traceback: %s", traceback.format_exc(),
)
def mqtt_subscribe(self, topic, qos):
self.logger.debug("Subscribing to Topic: %s, with Qos %s", topic, qos)
result = None
try:
result = self.mqtt_client.subscribe(topic, qos)
if result[0] == 0:
self.logger.debug("Subscription to Topic %s Successful", topic)
if topic not in self.mqtt_client_topics:
self.mqtt_client_topics.append(topic)
if "#" in topic or "+" in topic:
# its a wildcard
self.add_mqtt_wildcard(topic)
else:
if topic in self.mqtt_client_topics:
self.mqtt_client_topics.remove(topic)
self.logger.debug(
"Subscription to Topic %s Unsuccessful, as Client possibly not currently connected", topic,
)
except Exception as e:
self.logger.warning("There was an error while subscribing to topic %s, %s", topic, e)
self.logger.debug(traceback.format_exc())
return result
def mqtt_unsubscribe(self, topic):
self.logger.debug("Unsubscribing from Topic: %s", topic)
result = None
try:
result = self.mqtt_client.unsubscribe(topic)
if result[0] == 0:
self.logger.debug("Unsubscription from Topic %s Successful", topic)
if topic in self.mqtt_client_topics:
self.mqtt_client_topics.remove(topic)
self.remove_mqtt_binary(topic)
self.remove_mqtt_wildcard(topic)
else:
self.logger.warning("Unsubscription from Topic %s was not Successful", topic)
except Exception as e:
self.logger.warning("There was an error while unsubscribing from topic %s, %s", topic, e)
self.logger.debug(traceback.format_exc())
return result
async def call_plugin_service(self, namespace, domain, service, kwargs):
result = None
if "topic" in kwargs:
if not self.mqtt_connected: # ensure mqtt plugin is connected
self.logger.warning("Attempt to call Mqtt Service while disconnected: %s", service)
return None
try:
topic = kwargs["topic"]
payload = kwargs.get("payload", None)
retain = kwargs.get("retain", False)
qos = int(kwargs.get("qos", self.mqtt_qos))
if service == "publish":
self.logger.debug("Publish Payload: %s to Topic: %s", payload, topic)
result = await utils.run_in_executor(self, self.mqtt_client.publish, topic, payload, qos, retain)
if result[0] == 0:
self.logger.debug(
"Publishing Payload %s to Topic %s Successful", payload, topic,
)
else:
self.logger.warning(
"Publishing Payload %s to Topic %s was not Successful", payload, topic,
)
elif service == "subscribe":
if topic not in self.mqtt_client_topics:
result = await utils.run_in_executor(self, self.mqtt_subscribe, topic, qos)
else:
self.logger.info("Topic %s already subscribed to", topic)
elif service == "unsubscribe":
if topic in self.mqtt_client_topics:
result = await utils.run_in_executor(self, self.mqtt_unsubscribe, topic)
else:
self.logger.info("Topic %s already unsubscribed from", topic)
else:
self.logger.warning("Wrong Service Call %s for MQTT", service)
result = "ERR"
except Exception as e:
config = self.config
if config["type"] == "mqtt":
self.logger.debug(
"Got the following Error %s, when trying to retrieve Mqtt Plugin", e,
)
return str(e)
else:
self.logger.critical(
"Wrong Namespace %s selected for MQTT Service. Please use proper namespace before trying again",
namespace,
)
return "ERR"
else:
self.logger.warning("Topic not provided for Service Call {!r}.".format(service))
raise ValueError("Topic not provided, please provide Topic for Service Call")
return result
def add_mqtt_wildcard(self, wildcard):
"""Used to add to the plugin wildcard"""
if wildcard not in self.mqtt_wildcards:
self.mqtt_wildcards.append(wildcard)
return True
return False
def remove_mqtt_wildcard(self, wildcard):
"""Used to remove remove from the plugin wildcard"""
if wildcard in self.mqtt_wildcards:
self.mqtt_wildcards.remove(wildcard)
return True
return False
def add_mqtt_binary(self, topic):
"""Used to add to the plugin binary topic"""
if topic not in self.mqtt_binary_topics:
self.mqtt_binary_topics.append(topic)
return True
return False
def remove_mqtt_binary(self, topic):
"""Used to remove from the plugin binary topic"""
if topic in self.mqtt_binary_topics:
self.mqtt_binary_topics.remove(topic)
return True
return False
async def mqtt_client_state(self):
return self.mqtt_connected
async def send_ad_event(self, data):
await self.AD.events.process_event(self.namespace, data)
#
# Get initial state
#
async def get_complete_state(self):
self.logger.debug("*** Sending Complete State: %s ***", self.state)
return copy.deepcopy(self.state)
async def get_metadata(self):
return self.mqtt_metadata
#
# Utility gets called every second (or longer if configured
# Allows plugin to do any housekeeping required
#
def utility(self):
# self.logger.info("utility".format(self.state)
return
#
# Handle state updates
#
async def get_updates(self):
already_initialized = False
already_notified = False
first_time = True
first_time_service = True
self.mqtt_connect_event = asyncio.Event()
while not self.stopping:
while (
not self.initialized or not already_initialized
) and not self.stopping: # continue until initialization is successful
if (
not already_initialized and not already_notified
): # if it had connected before, it need not run this. Run if just trying for the first time
try:
await asyncio.wait_for(
utils.run_in_executor(self, self.start_mqtt_service, first_time_service), 5.0
)
await asyncio.wait_for(
self.mqtt_connect_event.wait(), 5.0
) # wait for it to return true for 5 seconds in case still processing connect
except asyncio.TimeoutError:
self.logger.critical(
"Could not Complete Connection to Broker, please Ensure Broker at URL %s:%s is correct and broker is not down and restart Appdaemon",
self.mqtt_client_host,
self.mqtt_client_port,
)
# meaning it should start anyway even if broker is down
if self.mqtt_client_force_start:
self.mqtt_connected = True
else:
self.mqtt_client.loop_stop()
# disconnect so it won't attempt reconnection if the broker was to come up
self.mqtt_client.disconnect()
first_time_service = False
state = await self.get_complete_state()
meta = await self.get_metadata()
# meaning the client has connected to the broker
if self.mqtt_connected:
await self.AD.plugins.notify_plugin_started(self.name, self.namespace, meta, state, first_time)
already_notified = False
already_initialized = True
self.logger.info("MQTT Plugin initialization complete")
self.initialized = True
else:
if not already_notified and already_initialized:
await self.AD.plugins.notify_plugin_stopped(self.name, self.namespace)
self.logger.critical("MQTT Plugin Stopped Unexpectedly")
already_notified = True
already_initialized = False
first_time = False
if not already_initialized and not already_notified:
self.logger.critical("Could not complete MQTT Plugin initialization, trying again in 5 seconds")
if self.stopping:
break
else:
self.logger.critical(
"Unable to reinitialize MQTT Plugin, will keep trying again until complete"
)
await asyncio.sleep(5)
await asyncio.sleep(5)
def get_namespace(self):
return self.namespace
def start_mqtt_service(self, first_time):
try:
# used to wait for connection
self.mqtt_connect_event.clear()
if first_time:
if self.mqtt_client_user is not None:
self.mqtt_client.username_pw_set(self.mqtt_client_user, password=self.mqtt_client_password)
set_tls = False
auth = {"tls_version": self.mqtt_tls_version}
if self.mqtt_client_tls_ca_cert is not None:
auth.update({"ca_certs": self.mqtt_client_tls_ca_cert})
set_tls = True
if self.mqtt_client_tls_client_cert is not None:
auth.update({"certfile": self.mqtt_client_tls_client_cert})
set_tls = True
if self.mqtt_client_tls_client_key is not None:
auth.update({"keyfile": self.mqtt_client_tls_client_key})
set_tls = True
if set_tls is True:
self.mqtt_client.tls_set(**auth)
if not self.mqtt_verify_cert:
self.mqtt_client.tls_insecure_set(not self.mqtt_verify_cert)
self.mqtt_client.will_set(
self.mqtt_will_topic, self.mqtt_will_payload, self.mqtt_qos, retain=self.mqtt_will_retain,
)
self.mqtt_client.connect_async(self.mqtt_client_host, self.mqtt_client_port, self.mqtt_client_timeout)
self.mqtt_client.loop_start()
except Exception as e:
self.logger.critical(
"There was an error while trying to setup the Mqtt Service. Error was: %s", e,
)
self.logger.debug(
"There was an error while trying to setup the MQTT Service. Error: %s, with Traceback: %s",
e,
traceback.format_exc(),
)
self.logger.debug(
"There was an error while trying to setup the MQTT Service, with Traceback: %s", traceback.format_exc(),
)
return
|
en
| 0.921219
|
Initialize MQTT Plugin. # get AD loop # disconnect cleanly # # Placeholder for constraints # # means connection was successful # # Register MQTT Services # # means there was an error # continue processing # unexpected disconnection # now check if the topic belongs to any of the wildcards # the binary data is not required # its a wildcard # ensure mqtt plugin is connected Used to add to the plugin wildcard Used to remove remove from the plugin wildcard Used to add to the plugin binary topic Used to remove from the plugin binary topic # # Get initial state # # # Utility gets called every second (or longer if configured # Allows plugin to do any housekeeping required # # self.logger.info("utility".format(self.state) # # Handle state updates # # continue until initialization is successful # if it had connected before, it need not run this. Run if just trying for the first time # wait for it to return true for 5 seconds in case still processing connect # meaning it should start anyway even if broker is down # disconnect so it won't attempt reconnection if the broker was to come up # meaning the client has connected to the broker # used to wait for connection
| 2.067963
| 2
|
rest-server.py
|
betacode-projects/my-escpos-webapi
| 1
|
6627702
|
import sys, os
import traceback
import datetime
import base64
import io
from PIL import Image
from flask import Flask, make_response, request
from flask_cors import CORS, cross_origin
import json
import escpos_ex
api = Flask(__name__)
CORS(api)
cmd_list = ['print']
res_json = {
'status' : 'success',
'msg-jp' : ''
}
@api.route('/print', methods=["GET", "POST", "OPTIONS"])
@cross_origin()
def get_user():
global cmd_list
escpos_ex.set_patlite_progress(1)
if request.method == "GET":
return send_req_error('許可されていないメソッドです', 405)
req_json = request.json
#print(req_json)
# 必須情報確認
if req_json == None or 'user' not in req_json or 'text' not in req_json or 'state' not in req_json:
return send_req_error('必須項目user, text, stateがありません', 406)
text = req_json['text'].strip()
user = req_json['user'].strip().replace('\n', '')
state = req_json['state'].strip().replace('\n', '')
if len(user) < 1 or len(state) < 1:
return send_req_error('ステート又はユーザー名が空欄です', 406)
# ヘッダ生成
dt_now = datetime.datetime.now()
headers = 'DATE: '+ dt_now.strftime('%Y/%m/%d %H:%M:%S') +'\n'
headers += 'IP : '+ request.remote_addr +'\n'
headers += 'STAT: '+ state +'\n'
headers += 'USER: '+ user +'\n'
headers += '-'*27
# 画像ある場合
pil_obj = None
if 'img' in req_json:
image_dec = None
try:
image_dec = base64.b64decode(req_json['img'])
pil_obj = Image.open(io.BytesIO(image_dec))
except:
print(traceback.format_exc())
type_, value_, traceback_ = sys.exc_info()
return send_req_error('画像データをデコードできませんでした - ' + str(value_), 406)
escpos_ex.set_patlite_progress(2)
try:
escpos_ex.print_text(text, headers, pil_obj)
except Exception as e:
print(traceback.format_exc())
type_, value_, traceback_ = sys.exc_info()
return send_req_error('印刷エラー - '+ str(value_), 506)
escpos_ex.set_patlite_progress(4)
return send_req_success('印刷完了しました')
@api.errorhandler(404)
def not_found(error):
return send_req_error('無効なページです', 404)
def send_req_error(msg, code):
global res_json
res_json['status'] = 'error'
res_json['msg-jp'] = msg
escpos_ex.set_patlite_progress(0)
escpos_ex.set_patlite('200000', '5')
return make_response(json.dumps(res_json, ensure_ascii=False), code)
def send_req_success(msg):
global res_json
res_json['status'] = 'success'
res_json['msg-jp'] = msg
return make_response(json.dumps(res_json, ensure_ascii=False), 201)
if __name__ == '__main__':
api.run(host='0.0.0.0', port=8880)
|
import sys, os
import traceback
import datetime
import base64
import io
from PIL import Image
from flask import Flask, make_response, request
from flask_cors import CORS, cross_origin
import json
import escpos_ex
api = Flask(__name__)
CORS(api)
cmd_list = ['print']
res_json = {
'status' : 'success',
'msg-jp' : ''
}
@api.route('/print', methods=["GET", "POST", "OPTIONS"])
@cross_origin()
def get_user():
global cmd_list
escpos_ex.set_patlite_progress(1)
if request.method == "GET":
return send_req_error('許可されていないメソッドです', 405)
req_json = request.json
#print(req_json)
# 必須情報確認
if req_json == None or 'user' not in req_json or 'text' not in req_json or 'state' not in req_json:
return send_req_error('必須項目user, text, stateがありません', 406)
text = req_json['text'].strip()
user = req_json['user'].strip().replace('\n', '')
state = req_json['state'].strip().replace('\n', '')
if len(user) < 1 or len(state) < 1:
return send_req_error('ステート又はユーザー名が空欄です', 406)
# ヘッダ生成
dt_now = datetime.datetime.now()
headers = 'DATE: '+ dt_now.strftime('%Y/%m/%d %H:%M:%S') +'\n'
headers += 'IP : '+ request.remote_addr +'\n'
headers += 'STAT: '+ state +'\n'
headers += 'USER: '+ user +'\n'
headers += '-'*27
# 画像ある場合
pil_obj = None
if 'img' in req_json:
image_dec = None
try:
image_dec = base64.b64decode(req_json['img'])
pil_obj = Image.open(io.BytesIO(image_dec))
except:
print(traceback.format_exc())
type_, value_, traceback_ = sys.exc_info()
return send_req_error('画像データをデコードできませんでした - ' + str(value_), 406)
escpos_ex.set_patlite_progress(2)
try:
escpos_ex.print_text(text, headers, pil_obj)
except Exception as e:
print(traceback.format_exc())
type_, value_, traceback_ = sys.exc_info()
return send_req_error('印刷エラー - '+ str(value_), 506)
escpos_ex.set_patlite_progress(4)
return send_req_success('印刷完了しました')
@api.errorhandler(404)
def not_found(error):
return send_req_error('無効なページです', 404)
def send_req_error(msg, code):
global res_json
res_json['status'] = 'error'
res_json['msg-jp'] = msg
escpos_ex.set_patlite_progress(0)
escpos_ex.set_patlite('200000', '5')
return make_response(json.dumps(res_json, ensure_ascii=False), code)
def send_req_success(msg):
global res_json
res_json['status'] = 'success'
res_json['msg-jp'] = msg
return make_response(json.dumps(res_json, ensure_ascii=False), 201)
if __name__ == '__main__':
api.run(host='0.0.0.0', port=8880)
|
ja
| 0.99617
|
#print(req_json) # 必須情報確認 # ヘッダ生成 # 画像ある場合
| 2.300101
| 2
|
server.py
|
z-------------/newsstand
| 1
|
6627703
|
<gh_stars>1-10
import sys
from http.server import CGIHTTPRequestHandler, HTTPServer
port = 8000
if len(sys.argv) > 1:
port = int(sys.argv[1])
handler = CGIHTTPRequestHandler
handler.cgi_directories = ["/py"]
server = HTTPServer(("", port), handler)
print("Server running on port " + str(port))
server.serve_forever()
|
import sys
from http.server import CGIHTTPRequestHandler, HTTPServer
port = 8000
if len(sys.argv) > 1:
port = int(sys.argv[1])
handler = CGIHTTPRequestHandler
handler.cgi_directories = ["/py"]
server = HTTPServer(("", port), handler)
print("Server running on port " + str(port))
server.serve_forever()
|
none
| 1
| 2.985031
| 3
|
|
datasets_tools/trans_mapillary_vistas_to_cityscapes.py
|
vghost2008/wml
| 6
|
6627704
|
import sys
from iotoolkit.mapillary_vistas_toolkit import *
from multiprocess import Pool
import img_utils as wmli
import object_detection_tools.visualization as odv
import matplotlib.pyplot as plt
import numpy as np
import object_detection2.mask as odm
import wml_utils as wmlu
import copy
import json
import cv2
lid = 0
name_to_id_dict = {
"construction--flat--bike-lane":0,
"construction--flat--driveway":0,
"construction--flat--road":0,
"construction--flat--road-shoulder":0,
"construction--flat--rail-track":0,
"construction--flat--sidewalk":1,
"object--street-light":2,
"construction--structure--bridge":3,
"construction--structure--building":4,
"human--":5,
"object--support--pole":6,
"marking--continuous--dashed":7,
"marking--continuous--solid":8,
"marking--discrete--crosswalk-zebra":9,
"nature--sand":10,
"nature--sky":11,
"nature--snow":12,
"nature--terrain":13,
"nature--vegetation":14,
"nature--water":15,
"object--vehicle--bicycle":16,
"object--vehicle--boat":17,
"object--vehicle--bus":18,
"object--vehicle--car":19,
"object--vehicle--vehicle-group":19,
"object--vehicle--caravan":20,
"object--vehicle--motorcycle":21,
"object--vehicle--on-rails":22,
"object--vehicle--truck":23,
"construction--flat--pedestrian-area":24,
"construction--structure--tunnel":25,
"void--ground":26,
"nature--":255,
"construction--":255,
"object--bench":255,
"void--":255,
}
def update_name_to_id(dict_data,dir):
names = []
with open(os.path.join(dir,"config_v2.0.json")) as fp:
data = json.load(fp)
data = data['labels']
for x in data:
names.append(x['name'])
new_dict_data = {}
for k,v in dict_data.items():
if k.endswith("--"):
for name in names:
if name.startswith(k) and name not in dict_data and name not in new_dict_data:
new_dict_data[name] = v
else:
new_dict_data[k] = v
return new_dict_data
def trans_data(data_dir,save_dir,beg,end):
global name_to_id_dict
wmlu.show_dict(name_to_id_dict)
wmlu.create_empty_dir(save_dir,remove_if_exists=False)
def name_to_id(x):
return name_to_id_dict[x]
ignored_labels = ["construction--barrier--ambiguous","construction--barrier--separator"]
data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False,
ignored_labels=ignored_labels,
label_map=None,
sub_dir_name="validation",
#sub_dir_name="training",
allowed_labels_fn=list(name_to_id_dict.keys()))
data.read_data(data_dir)
def filter(full_path,_):
base_name = wmlu.base_name(full_path)+".png"
save_path = os.path.join(save_dir,base_name)
if os.path.exists(save_path):
print(f"File {save_path} exists.")
return False
print(f"File {save_path} not exists.")
return True
for i,x in enumerate(data.get_items(beg,end,filter=filter)):
full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
if len(category_ids) == 0:
print(f"Skip {full_path}")
continue
new_mask = odm.dense_mask_to_sparse_mask(binary_mask,category_ids,default_label=255)
base_name = wmlu.base_name(full_path)+".png"
save_path = os.path.join(save_dir,base_name)
new_mask = new_mask.astype(np.uint8)
if os.path.exists(save_path):
print(f"WARNING: File {save_path} exists.")
cv2.imwrite(save_path,new_mask)
sys.stdout.write(f"\r{i}")
if __name__ == "__main__":
data_dir ="/home/wj/ai/mldata/mapillary_vistas/"
save_dir = os.path.join(data_dir,'boe_labels_validation')
name_to_id_dict = update_name_to_id(name_to_id_dict,data_dir)
idxs = list(range(0,18049,50))
r_idxs = []
for i in range(len(idxs)-1):
r_idxs.append([idxs[i],idxs[i+1]])
wmlu.show_list(r_idxs)
pool = Pool(10)
def fun(d):
trans_data(data_dir,save_dir,d[0],d[1])
res = list(pool.map(fun,r_idxs))
pool.close()
pool.join()
print(res)
#list(map(fun,r_idxs))
|
import sys
from iotoolkit.mapillary_vistas_toolkit import *
from multiprocess import Pool
import img_utils as wmli
import object_detection_tools.visualization as odv
import matplotlib.pyplot as plt
import numpy as np
import object_detection2.mask as odm
import wml_utils as wmlu
import copy
import json
import cv2
lid = 0
name_to_id_dict = {
"construction--flat--bike-lane":0,
"construction--flat--driveway":0,
"construction--flat--road":0,
"construction--flat--road-shoulder":0,
"construction--flat--rail-track":0,
"construction--flat--sidewalk":1,
"object--street-light":2,
"construction--structure--bridge":3,
"construction--structure--building":4,
"human--":5,
"object--support--pole":6,
"marking--continuous--dashed":7,
"marking--continuous--solid":8,
"marking--discrete--crosswalk-zebra":9,
"nature--sand":10,
"nature--sky":11,
"nature--snow":12,
"nature--terrain":13,
"nature--vegetation":14,
"nature--water":15,
"object--vehicle--bicycle":16,
"object--vehicle--boat":17,
"object--vehicle--bus":18,
"object--vehicle--car":19,
"object--vehicle--vehicle-group":19,
"object--vehicle--caravan":20,
"object--vehicle--motorcycle":21,
"object--vehicle--on-rails":22,
"object--vehicle--truck":23,
"construction--flat--pedestrian-area":24,
"construction--structure--tunnel":25,
"void--ground":26,
"nature--":255,
"construction--":255,
"object--bench":255,
"void--":255,
}
def update_name_to_id(dict_data,dir):
names = []
with open(os.path.join(dir,"config_v2.0.json")) as fp:
data = json.load(fp)
data = data['labels']
for x in data:
names.append(x['name'])
new_dict_data = {}
for k,v in dict_data.items():
if k.endswith("--"):
for name in names:
if name.startswith(k) and name not in dict_data and name not in new_dict_data:
new_dict_data[name] = v
else:
new_dict_data[k] = v
return new_dict_data
def trans_data(data_dir,save_dir,beg,end):
global name_to_id_dict
wmlu.show_dict(name_to_id_dict)
wmlu.create_empty_dir(save_dir,remove_if_exists=False)
def name_to_id(x):
return name_to_id_dict[x]
ignored_labels = ["construction--barrier--ambiguous","construction--barrier--separator"]
data = MapillaryVistasData(label_text2id=name_to_id, shuffle=False,
ignored_labels=ignored_labels,
label_map=None,
sub_dir_name="validation",
#sub_dir_name="training",
allowed_labels_fn=list(name_to_id_dict.keys()))
data.read_data(data_dir)
def filter(full_path,_):
base_name = wmlu.base_name(full_path)+".png"
save_path = os.path.join(save_dir,base_name)
if os.path.exists(save_path):
print(f"File {save_path} exists.")
return False
print(f"File {save_path} not exists.")
return True
for i,x in enumerate(data.get_items(beg,end,filter=filter)):
full_path, img_info, category_ids, category_names, boxes, binary_mask, area, is_crowd, num_annotations_skipped = x
if len(category_ids) == 0:
print(f"Skip {full_path}")
continue
new_mask = odm.dense_mask_to_sparse_mask(binary_mask,category_ids,default_label=255)
base_name = wmlu.base_name(full_path)+".png"
save_path = os.path.join(save_dir,base_name)
new_mask = new_mask.astype(np.uint8)
if os.path.exists(save_path):
print(f"WARNING: File {save_path} exists.")
cv2.imwrite(save_path,new_mask)
sys.stdout.write(f"\r{i}")
if __name__ == "__main__":
data_dir ="/home/wj/ai/mldata/mapillary_vistas/"
save_dir = os.path.join(data_dir,'boe_labels_validation')
name_to_id_dict = update_name_to_id(name_to_id_dict,data_dir)
idxs = list(range(0,18049,50))
r_idxs = []
for i in range(len(idxs)-1):
r_idxs.append([idxs[i],idxs[i+1]])
wmlu.show_list(r_idxs)
pool = Pool(10)
def fun(d):
trans_data(data_dir,save_dir,d[0],d[1])
res = list(pool.map(fun,r_idxs))
pool.close()
pool.join()
print(res)
#list(map(fun,r_idxs))
|
en
| 0.121462
|
#sub_dir_name="training", #list(map(fun,r_idxs))
| 1.848155
| 2
|
merendeira/supplies/admin.py
|
diogobaeder/merendeira
| 0
|
6627705
|
from django.contrib import admin
from merendeira.supplies.models import Category, Product
admin.site.register(Category)
admin.site.register(Product)
|
from django.contrib import admin
from merendeira.supplies.models import Category, Product
admin.site.register(Category)
admin.site.register(Product)
|
none
| 1
| 1.271919
| 1
|
|
linkedlist/Reference_code/q12.py
|
pengfei-chen/algorithm_qa
| 79
|
6627706
|
<gh_stars>10-100
"""
问题描述:给定一个单链表的头结点head,实现一个调整单链表的函数,使得每K个节点之间逆序,
如果最后不够k个节点一组,则不调整最后几个节点。
例如:k = 3时
链表:1->2->3->4->5->6->7->8->None
调整后:3->2->1->6->5->4->7->8->None,7、8不调整,因为不够一组
思路:
1)使用辅助栈或者队列来做n*k个节点的倒置
2)直接使用有限(四个)变量来解决该问题,left表示每k个节点的前一个,start表示每k个节点
的第一个,end表示每k个节点的最后一个,right表示每k个节点的最后一个的下一个。在翻转之前,
有关系:left.next = start end.next = right。可利用该关系解决。注意边界条件
"""
from linkedlist.toolcls import Node, PrintMixin
class ReversePartList(PrintMixin):
@classmethod
def reverse_part(cls, head, k):
if head is None or k < 1:
return head
cur = head
count = 0
# 辅助队列
temp_stack = list()
length = 0
while cur is not None:
length += 1
cur = cur.next
cur = head
# 倒置后的头结点
new_head = None
# 当前出现了几个 "k节点" 了
circle_count = 0
# 每k个节点的尾节点倒置后的前一个节点
new_pre_node = None
while cur is not None:
count += 1
temp_stack.append(cur)
next = cur.next
if count == k:
pre_node = None
first_node = None
circle_count += 1
while count > 0:
temp_node = temp_stack.pop(0)
if count == k:
first_node = temp_node
if circle_count == 1 and count == 1:
new_head = temp_node
temp_node.next = pre_node
if new_pre_node is not None and count == 1:
new_pre_node.next = temp_node
pre_node = temp_node
count -= 1
new_pre_node = first_node
cur = next
if len(temp_stack) > 0:
temp_node = temp_stack.pop(0)
if new_pre_node is not None:
new_pre_node.next = temp_node
else:
return temp_node
return new_head
if __name__ == '__main__':
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
k = 3
ReversePartList.print_list(ReversePartList.reverse_part(head, k))
|
"""
问题描述:给定一个单链表的头结点head,实现一个调整单链表的函数,使得每K个节点之间逆序,
如果最后不够k个节点一组,则不调整最后几个节点。
例如:k = 3时
链表:1->2->3->4->5->6->7->8->None
调整后:3->2->1->6->5->4->7->8->None,7、8不调整,因为不够一组
思路:
1)使用辅助栈或者队列来做n*k个节点的倒置
2)直接使用有限(四个)变量来解决该问题,left表示每k个节点的前一个,start表示每k个节点
的第一个,end表示每k个节点的最后一个,right表示每k个节点的最后一个的下一个。在翻转之前,
有关系:left.next = start end.next = right。可利用该关系解决。注意边界条件
"""
from linkedlist.toolcls import Node, PrintMixin
class ReversePartList(PrintMixin):
@classmethod
def reverse_part(cls, head, k):
if head is None or k < 1:
return head
cur = head
count = 0
# 辅助队列
temp_stack = list()
length = 0
while cur is not None:
length += 1
cur = cur.next
cur = head
# 倒置后的头结点
new_head = None
# 当前出现了几个 "k节点" 了
circle_count = 0
# 每k个节点的尾节点倒置后的前一个节点
new_pre_node = None
while cur is not None:
count += 1
temp_stack.append(cur)
next = cur.next
if count == k:
pre_node = None
first_node = None
circle_count += 1
while count > 0:
temp_node = temp_stack.pop(0)
if count == k:
first_node = temp_node
if circle_count == 1 and count == 1:
new_head = temp_node
temp_node.next = pre_node
if new_pre_node is not None and count == 1:
new_pre_node.next = temp_node
pre_node = temp_node
count -= 1
new_pre_node = first_node
cur = next
if len(temp_stack) > 0:
temp_node = temp_stack.pop(0)
if new_pre_node is not None:
new_pre_node.next = temp_node
else:
return temp_node
return new_head
if __name__ == '__main__':
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
k = 3
ReversePartList.print_list(ReversePartList.reverse_part(head, k))
|
zh
| 0.971854
|
问题描述:给定一个单链表的头结点head,实现一个调整单链表的函数,使得每K个节点之间逆序, 如果最后不够k个节点一组,则不调整最后几个节点。 例如:k = 3时 链表:1->2->3->4->5->6->7->8->None 调整后:3->2->1->6->5->4->7->8->None,7、8不调整,因为不够一组 思路: 1)使用辅助栈或者队列来做n*k个节点的倒置 2)直接使用有限(四个)变量来解决该问题,left表示每k个节点的前一个,start表示每k个节点 的第一个,end表示每k个节点的最后一个,right表示每k个节点的最后一个的下一个。在翻转之前, 有关系:left.next = start end.next = right。可利用该关系解决。注意边界条件 # 辅助队列 # 倒置后的头结点 # 当前出现了几个 "k节点" 了 # 每k个节点的尾节点倒置后的前一个节点
| 3.945144
| 4
|
main.py
|
zetof/boing
| 0
|
6627707
|
from interface.lpd8 import LPD8
from interface.theater import Theater
from interface.stage import Stage
from helpers.instrument import Instrument
from helpers.scale import Scale
THEATER_WIDTH = 400 # Width of main window in pixels
THEATER_HEIGHT = 400 # Height of main window in pixels
STAGE_SIZE = 8 # Number of cells in a border of a square stage
TEMPO = 40 # Tempo in BPM
OSC_URL = '127.0.0.1' # IP address where to send OSC messages
OSC_PORT = 57120 # Port where to send OSC messages
# Starts a LPD8 as MIDI device
lpd8 = LPD8()
# Prepare the main window, also called the theater
theater = Theater(THEATER_WIDTH, THEATER_HEIGHT, TEMPO * (STAGE_SIZE - 1), midi=lpd8)
# Prepare the stage
stage = Stage(2, 2, THEATER_HEIGHT - 4, STAGE_SIZE, OSC_URL, OSC_PORT, seed=3578)
# Build two instruments
xylophone = Instrument('xylophone', 6, 0, 6, motion='RIGHT')
bass = Instrument('bass', 2, 0, 20)
# Prepare two scales
major_penta_x = Scale('MAJOR PENTATONIC',60, STAGE_SIZE)
major_penta_b = Scale('IONIAN',43, STAGE_SIZE)
# Add scales to instruments
xylophone.set_scale(major_penta_x)
bass.set_scale(major_penta_b)
# Add instruments to stage
stage.add_instrument(xylophone)
stage.add_instrument(bass)
# Add stage to theater
theater.add_stage(stage)
# Start playing !!!
theater.start_performance()
|
from interface.lpd8 import LPD8
from interface.theater import Theater
from interface.stage import Stage
from helpers.instrument import Instrument
from helpers.scale import Scale
THEATER_WIDTH = 400 # Width of main window in pixels
THEATER_HEIGHT = 400 # Height of main window in pixels
STAGE_SIZE = 8 # Number of cells in a border of a square stage
TEMPO = 40 # Tempo in BPM
OSC_URL = '127.0.0.1' # IP address where to send OSC messages
OSC_PORT = 57120 # Port where to send OSC messages
# Starts a LPD8 as MIDI device
lpd8 = LPD8()
# Prepare the main window, also called the theater
theater = Theater(THEATER_WIDTH, THEATER_HEIGHT, TEMPO * (STAGE_SIZE - 1), midi=lpd8)
# Prepare the stage
stage = Stage(2, 2, THEATER_HEIGHT - 4, STAGE_SIZE, OSC_URL, OSC_PORT, seed=3578)
# Build two instruments
xylophone = Instrument('xylophone', 6, 0, 6, motion='RIGHT')
bass = Instrument('bass', 2, 0, 20)
# Prepare two scales
major_penta_x = Scale('MAJOR PENTATONIC',60, STAGE_SIZE)
major_penta_b = Scale('IONIAN',43, STAGE_SIZE)
# Add scales to instruments
xylophone.set_scale(major_penta_x)
bass.set_scale(major_penta_b)
# Add instruments to stage
stage.add_instrument(xylophone)
stage.add_instrument(bass)
# Add stage to theater
theater.add_stage(stage)
# Start playing !!!
theater.start_performance()
|
en
| 0.887202
|
# Width of main window in pixels # Height of main window in pixels # Number of cells in a border of a square stage # Tempo in BPM # IP address where to send OSC messages # Port where to send OSC messages # Starts a LPD8 as MIDI device # Prepare the main window, also called the theater # Prepare the stage # Build two instruments # Prepare two scales # Add scales to instruments # Add instruments to stage # Add stage to theater # Start playing !!!
| 2.685276
| 3
|
model/attention.py
|
thepowerfuldeez/VAENAR-TTS
| 0
|
6627708
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from .utils import LinearNorm, FFN
from utils.tools import get_mask_from_lengths
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BaseAttention(nn.Module):
def __init__(self, attention_dim):
super(BaseAttention, self).__init__()
self.attention_dim = attention_dim
def forward(self, inputs, memory, memory_lengths, query_lengths):
"""
:param inputs: query, [batch, q_time, q_dim]
:param memory: [batch, m_time, m_dim]
:param memory_lengths: [batch,]
:param query_lengths: [batch,]
:return: (tensor1, tensor2)
tensor1: contexts, [batch, q_time, attention_dim]
tensor2: alignments, probabilities, [batch, q_time, m_time]
"""
raise NotImplementedError
@staticmethod
def _get_key_mask(batch_size, memory_max_time, query_max_time, memory_lengths, query_lengths, device):
memory_lengths = (memory_lengths if memory_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * memory_max_time)
memeory_mask = get_mask_from_lengths(memory_lengths, memory_max_time)
memeory_mask = torch.tile(memeory_mask.unsqueeze(1), # [batch, 1, m_max_time]
[1, query_max_time, 1]) # [batch, q_max_time, m_max_time]
query_lengths = (query_lengths if query_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * query_max_time)
query_mask = get_mask_from_lengths(query_lengths, query_max_time) # [batch, q_max_time]
query_mask = torch.tile(query_mask.unsqueeze(2), # [batch, q_max_time, 1]
[1, 1, memory_max_time]) # [batch, q_max_time, m_max_time]
length_mask = torch.logical_and(memeory_mask, query_mask)
return length_mask
class MultiHeadScaledProductAttention(BaseAttention):
def __init__(self, attention_dim, input_dim, memory_dim, num_head, temperature=1.0):
assert attention_dim % num_head == 0
super(MultiHeadScaledProductAttention, self).__init__(
attention_dim=attention_dim)
self.query_layer = LinearNorm(
input_dim, attention_dim, use_bias=False)
self.key_layer = LinearNorm(
memory_dim, attention_dim, use_bias=False)
self.value_layer = LinearNorm(
memory_dim, attention_dim, use_bias=False)
self.num_head = num_head
self.temperature = temperature
def _split_head(self, inputs):
"""
:param inputs: [batch, time, dim]
:return: [batch, num_head, time, dim // head]
"""
batch, max_time, dim = inputs.shape
reshaped = inputs.reshape(batch, max_time, self.num_head,
dim // self.num_head)
# [batch, time, num_head, dim // head]
transposed = reshaped.permute(0, 2, 1, 3)
# [batch, num_head, time, dim // head]
return transposed
def _merge_head(self, inputs):
"""
:param inputs: [batch, num_head, time, dim]
:return: [batch, time, attention_dim]
"""
batch, _, time, head_dim = inputs.shape
transposed = inputs.permute(0, 2, 1, 3)
# [batch, time, num_head, dim]
reshaped = transposed.reshape(batch, time, self.num_head * head_dim)
return reshaped
def _get_key_mask(self, batch_size, memory_max_time, query_max_time,
memory_lengths, query_lengths, device):
memory_lengths = (memory_lengths if memory_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * memory_max_time)
memory_mask = get_mask_from_lengths(memory_lengths, memory_max_time) # [batch, m_max_time]
memory_mask = torch.tile(memory_mask.unsqueeze(1), # [batch, 1, m_max_time]
[1, query_max_time, 1]) # [batch, q_max_time, m_max_time]
query_lengths = (query_lengths if query_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * query_max_time)
query_mask = get_mask_from_lengths(query_lengths, query_max_time) # [batch, q_max_time]
query_mask = torch.tile(query_mask.unsqueeze(2), # [batch, q_max_time, 1]
[1, 1, memory_max_time]) # [batch, q_max_time, m_max_time]
length_mask = torch.logical_and(memory_mask, query_mask)
length_mask = torch.tile(length_mask.unsqueeze(1),
[1, self.num_head, 1, 1])
# [batch, num_head, q_max_time, m_max_time]
return length_mask
@staticmethod
def _get_causal_mask(logits):
causal_mask = torch.tril(torch.ones(logits.shape, dtype=torch.bool, device=logits.device))
return causal_mask
def forward(self, inputs, memory, memory_lengths=None, query_lengths=None, causality=None):
queries = self.query_layer(inputs) # [batch, Tq, D]
keys = self.key_layer(memory) # [batch, Tk, D]
values = self.value_layer(memory) # [batch, Tk, Dv]
headed_queries = self._split_head(queries) # [batch, num_head, Tq, head_dim]
headed_keys = self._split_head(keys) # [batch, num_head, Tk, head_dim]
headed_values = self._split_head(values) # [batch, num_head, Tk, head_dim]
logits = torch.matmul(headed_queries,
headed_keys.transpose(-2, -1)) # [batch, num_head, Tq, Tk]
logits = logits / math.sqrt(
float(self.attention_dim // self.num_head)) # scale
logits = logits / self.temperature # temperature
# apply mask
batch_size = memory.shape[0]
memory_max_time = memory.shape[1]
query_max_time = inputs.shape[1]
length_mask = self._get_key_mask(
batch_size, memory_max_time, query_max_time, memory_lengths, query_lengths, inputs.device)
if causality:
causal_mask = self._get_causal_mask(logits)
length_mask = torch.logical_and(length_mask, causal_mask)
# [batch, num_head, q_max_time, m_max_time]
paddings = torch.ones_like(logits, dtype=torch.float32) * (-2. ** 32 + 1)
logits = torch.where(length_mask, logits, paddings)
alignments = torch.softmax(logits, dim=3) # [batch, num_head, Tq, Tk]
contexts = torch.matmul(alignments, headed_values)
# [batch, num_head, Tq, head_dim]
contexts = self._merge_head(contexts) # [batch, Tq, attention_dim]
return contexts, alignments
class SelfAttentionBlock(nn.Module):
def __init__(self, input_dim, attention_dim, attention_heads, attention_temperature,
ffn_hidden):
super(SelfAttentionBlock, self).__init__()
self.input_dim = input_dim
self.attention_dim = attention_dim
self.attention = MultiHeadScaledProductAttention(attention_dim=attention_dim,
input_dim=input_dim,
memory_dim=input_dim,
num_head=attention_heads,
temperature=attention_temperature)
self.att_proj = LinearNorm(attention_dim + input_dim, input_dim)
self.layer_norm = nn.LayerNorm(input_dim)
self.ffn = FFN(in_features=input_dim, hidden1=ffn_hidden, hidden2=input_dim)
def forward(self, inputs, memory, query_lengths, memory_lengths, causality=None):
att_outs, alignments = self.attention(inputs=inputs, memory=memory,
query_lengths=query_lengths,
memory_lengths=memory_lengths,
causality=causality)
contexts = torch.cat([inputs, att_outs], dim=-1)
att_outs = self.att_proj(contexts)
att_outs = self.layer_norm(inputs + att_outs)
ffn_outs = self.ffn(att_outs)
return ffn_outs, alignments
class CrossAttentionBlock(nn.Module):
def __init__(self, input_dim, memory_dim, attention_dim, attention_heads, attention_temperature,
ffn_hidden, name=None):
super(CrossAttentionBlock, self).__init__()
self.name = name
self.input_dim = input_dim
self.attention_dim = attention_dim
self.self_attention = MultiHeadScaledProductAttention(
attention_dim=attention_dim, input_dim=input_dim, memory_dim=input_dim, num_head=attention_heads,
temperature=attention_temperature)
self.att_proj1 = LinearNorm(attention_dim + input_dim, input_dim)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.cross_attention = MultiHeadScaledProductAttention(
attention_dim=attention_dim, input_dim=input_dim, memory_dim=memory_dim, num_head=attention_heads,
temperature=attention_temperature)
self.att_proj2 = LinearNorm(attention_dim * 2, attention_dim)
self.layer_norm2 = nn.LayerNorm(attention_dim)
self.ffn = FFN(in_features=attention_dim, hidden1=ffn_hidden, hidden2=attention_dim)
def forward(self, inputs, memory, query_lengths, memory_lengths):
self_att_outs, self_ali = self.self_attention(
inputs=inputs, memory=inputs, query_lengths=query_lengths,
memory_lengths=query_lengths, causality=True)
contexts = torch.cat([inputs, self_att_outs], dim=-1)
self_att_outs = self.att_proj1(contexts)
self_att_outs = self.layer_norm1(self_att_outs + inputs)
att_outs, cross_ali = self.cross_attention(
inputs=self_att_outs, memory=memory, query_lengths=query_lengths,
memory_lengths=memory_lengths, causality=False)
contexts = torch.cat([self_att_outs, att_outs], dim=-1)
att_outs = self.att_proj2(contexts)
att_outs = self.layer_norm2(att_outs + self_att_outs)
ffn_outs = self.ffn(att_outs)
return ffn_outs, cross_ali
|
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from .utils import LinearNorm, FFN
from utils.tools import get_mask_from_lengths
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class BaseAttention(nn.Module):
def __init__(self, attention_dim):
super(BaseAttention, self).__init__()
self.attention_dim = attention_dim
def forward(self, inputs, memory, memory_lengths, query_lengths):
"""
:param inputs: query, [batch, q_time, q_dim]
:param memory: [batch, m_time, m_dim]
:param memory_lengths: [batch,]
:param query_lengths: [batch,]
:return: (tensor1, tensor2)
tensor1: contexts, [batch, q_time, attention_dim]
tensor2: alignments, probabilities, [batch, q_time, m_time]
"""
raise NotImplementedError
@staticmethod
def _get_key_mask(batch_size, memory_max_time, query_max_time, memory_lengths, query_lengths, device):
memory_lengths = (memory_lengths if memory_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * memory_max_time)
memeory_mask = get_mask_from_lengths(memory_lengths, memory_max_time)
memeory_mask = torch.tile(memeory_mask.unsqueeze(1), # [batch, 1, m_max_time]
[1, query_max_time, 1]) # [batch, q_max_time, m_max_time]
query_lengths = (query_lengths if query_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * query_max_time)
query_mask = get_mask_from_lengths(query_lengths, query_max_time) # [batch, q_max_time]
query_mask = torch.tile(query_mask.unsqueeze(2), # [batch, q_max_time, 1]
[1, 1, memory_max_time]) # [batch, q_max_time, m_max_time]
length_mask = torch.logical_and(memeory_mask, query_mask)
return length_mask
class MultiHeadScaledProductAttention(BaseAttention):
def __init__(self, attention_dim, input_dim, memory_dim, num_head, temperature=1.0):
assert attention_dim % num_head == 0
super(MultiHeadScaledProductAttention, self).__init__(
attention_dim=attention_dim)
self.query_layer = LinearNorm(
input_dim, attention_dim, use_bias=False)
self.key_layer = LinearNorm(
memory_dim, attention_dim, use_bias=False)
self.value_layer = LinearNorm(
memory_dim, attention_dim, use_bias=False)
self.num_head = num_head
self.temperature = temperature
def _split_head(self, inputs):
"""
:param inputs: [batch, time, dim]
:return: [batch, num_head, time, dim // head]
"""
batch, max_time, dim = inputs.shape
reshaped = inputs.reshape(batch, max_time, self.num_head,
dim // self.num_head)
# [batch, time, num_head, dim // head]
transposed = reshaped.permute(0, 2, 1, 3)
# [batch, num_head, time, dim // head]
return transposed
def _merge_head(self, inputs):
"""
:param inputs: [batch, num_head, time, dim]
:return: [batch, time, attention_dim]
"""
batch, _, time, head_dim = inputs.shape
transposed = inputs.permute(0, 2, 1, 3)
# [batch, time, num_head, dim]
reshaped = transposed.reshape(batch, time, self.num_head * head_dim)
return reshaped
def _get_key_mask(self, batch_size, memory_max_time, query_max_time,
memory_lengths, query_lengths, device):
memory_lengths = (memory_lengths if memory_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * memory_max_time)
memory_mask = get_mask_from_lengths(memory_lengths, memory_max_time) # [batch, m_max_time]
memory_mask = torch.tile(memory_mask.unsqueeze(1), # [batch, 1, m_max_time]
[1, query_max_time, 1]) # [batch, q_max_time, m_max_time]
query_lengths = (query_lengths if query_lengths is not None
else torch.ones(batch_size, dtype=torch.int32, device=device) * query_max_time)
query_mask = get_mask_from_lengths(query_lengths, query_max_time) # [batch, q_max_time]
query_mask = torch.tile(query_mask.unsqueeze(2), # [batch, q_max_time, 1]
[1, 1, memory_max_time]) # [batch, q_max_time, m_max_time]
length_mask = torch.logical_and(memory_mask, query_mask)
length_mask = torch.tile(length_mask.unsqueeze(1),
[1, self.num_head, 1, 1])
# [batch, num_head, q_max_time, m_max_time]
return length_mask
@staticmethod
def _get_causal_mask(logits):
causal_mask = torch.tril(torch.ones(logits.shape, dtype=torch.bool, device=logits.device))
return causal_mask
def forward(self, inputs, memory, memory_lengths=None, query_lengths=None, causality=None):
queries = self.query_layer(inputs) # [batch, Tq, D]
keys = self.key_layer(memory) # [batch, Tk, D]
values = self.value_layer(memory) # [batch, Tk, Dv]
headed_queries = self._split_head(queries) # [batch, num_head, Tq, head_dim]
headed_keys = self._split_head(keys) # [batch, num_head, Tk, head_dim]
headed_values = self._split_head(values) # [batch, num_head, Tk, head_dim]
logits = torch.matmul(headed_queries,
headed_keys.transpose(-2, -1)) # [batch, num_head, Tq, Tk]
logits = logits / math.sqrt(
float(self.attention_dim // self.num_head)) # scale
logits = logits / self.temperature # temperature
# apply mask
batch_size = memory.shape[0]
memory_max_time = memory.shape[1]
query_max_time = inputs.shape[1]
length_mask = self._get_key_mask(
batch_size, memory_max_time, query_max_time, memory_lengths, query_lengths, inputs.device)
if causality:
causal_mask = self._get_causal_mask(logits)
length_mask = torch.logical_and(length_mask, causal_mask)
# [batch, num_head, q_max_time, m_max_time]
paddings = torch.ones_like(logits, dtype=torch.float32) * (-2. ** 32 + 1)
logits = torch.where(length_mask, logits, paddings)
alignments = torch.softmax(logits, dim=3) # [batch, num_head, Tq, Tk]
contexts = torch.matmul(alignments, headed_values)
# [batch, num_head, Tq, head_dim]
contexts = self._merge_head(contexts) # [batch, Tq, attention_dim]
return contexts, alignments
class SelfAttentionBlock(nn.Module):
def __init__(self, input_dim, attention_dim, attention_heads, attention_temperature,
ffn_hidden):
super(SelfAttentionBlock, self).__init__()
self.input_dim = input_dim
self.attention_dim = attention_dim
self.attention = MultiHeadScaledProductAttention(attention_dim=attention_dim,
input_dim=input_dim,
memory_dim=input_dim,
num_head=attention_heads,
temperature=attention_temperature)
self.att_proj = LinearNorm(attention_dim + input_dim, input_dim)
self.layer_norm = nn.LayerNorm(input_dim)
self.ffn = FFN(in_features=input_dim, hidden1=ffn_hidden, hidden2=input_dim)
def forward(self, inputs, memory, query_lengths, memory_lengths, causality=None):
att_outs, alignments = self.attention(inputs=inputs, memory=memory,
query_lengths=query_lengths,
memory_lengths=memory_lengths,
causality=causality)
contexts = torch.cat([inputs, att_outs], dim=-1)
att_outs = self.att_proj(contexts)
att_outs = self.layer_norm(inputs + att_outs)
ffn_outs = self.ffn(att_outs)
return ffn_outs, alignments
class CrossAttentionBlock(nn.Module):
def __init__(self, input_dim, memory_dim, attention_dim, attention_heads, attention_temperature,
ffn_hidden, name=None):
super(CrossAttentionBlock, self).__init__()
self.name = name
self.input_dim = input_dim
self.attention_dim = attention_dim
self.self_attention = MultiHeadScaledProductAttention(
attention_dim=attention_dim, input_dim=input_dim, memory_dim=input_dim, num_head=attention_heads,
temperature=attention_temperature)
self.att_proj1 = LinearNorm(attention_dim + input_dim, input_dim)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.cross_attention = MultiHeadScaledProductAttention(
attention_dim=attention_dim, input_dim=input_dim, memory_dim=memory_dim, num_head=attention_heads,
temperature=attention_temperature)
self.att_proj2 = LinearNorm(attention_dim * 2, attention_dim)
self.layer_norm2 = nn.LayerNorm(attention_dim)
self.ffn = FFN(in_features=attention_dim, hidden1=ffn_hidden, hidden2=attention_dim)
def forward(self, inputs, memory, query_lengths, memory_lengths):
self_att_outs, self_ali = self.self_attention(
inputs=inputs, memory=inputs, query_lengths=query_lengths,
memory_lengths=query_lengths, causality=True)
contexts = torch.cat([inputs, self_att_outs], dim=-1)
self_att_outs = self.att_proj1(contexts)
self_att_outs = self.layer_norm1(self_att_outs + inputs)
att_outs, cross_ali = self.cross_attention(
inputs=self_att_outs, memory=memory, query_lengths=query_lengths,
memory_lengths=memory_lengths, causality=False)
contexts = torch.cat([self_att_outs, att_outs], dim=-1)
att_outs = self.att_proj2(contexts)
att_outs = self.layer_norm2(att_outs + self_att_outs)
ffn_outs = self.ffn(att_outs)
return ffn_outs, cross_ali
|
en
| 0.525237
|
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu") :param inputs: query, [batch, q_time, q_dim] :param memory: [batch, m_time, m_dim] :param memory_lengths: [batch,] :param query_lengths: [batch,] :return: (tensor1, tensor2) tensor1: contexts, [batch, q_time, attention_dim] tensor2: alignments, probabilities, [batch, q_time, m_time] # [batch, 1, m_max_time] # [batch, q_max_time, m_max_time] # [batch, q_max_time] # [batch, q_max_time, 1] # [batch, q_max_time, m_max_time] :param inputs: [batch, time, dim] :return: [batch, num_head, time, dim // head] # [batch, time, num_head, dim // head] # [batch, num_head, time, dim // head] :param inputs: [batch, num_head, time, dim] :return: [batch, time, attention_dim] # [batch, time, num_head, dim] # [batch, m_max_time] # [batch, 1, m_max_time] # [batch, q_max_time, m_max_time] # [batch, q_max_time] # [batch, q_max_time, 1] # [batch, q_max_time, m_max_time] # [batch, num_head, q_max_time, m_max_time] # [batch, Tq, D] # [batch, Tk, D] # [batch, Tk, Dv] # [batch, num_head, Tq, head_dim] # [batch, num_head, Tk, head_dim] # [batch, num_head, Tk, head_dim] # [batch, num_head, Tq, Tk] # scale # temperature # apply mask # [batch, num_head, q_max_time, m_max_time] # [batch, num_head, Tq, Tk] # [batch, num_head, Tq, head_dim] # [batch, Tq, attention_dim]
| 2.097205
| 2
|
isofit/geometry.py
|
cfranken/isofit
| 0
|
6627709
|
#! /usr/bin/env python3
#
# Copyright 2018 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: <NAME>, <EMAIL>
#
import scipy as s
from sunposition import sunpos
from datetime import datetime
class Geometry:
"""The geometry of the observation, all we need to calculate sensor,
surface and solar positions"""
def __init__(self, obs=None, glt=None, loc=None, ds=None,
esd=None, pushbroom_column=None):
self.earth_sun_file = None
self.observer_zenith = None
self.observer_azimuth = None
self.observer_altitude_km = None
self.surface_elevation_km = None
self.datetime = None
self.day_of_year = None
self.latitude = None
self.longitude = None
self.longitudeE = None
self.gmtime = None
self.earth_sun_distance = None
self.pushbroom_column = pushbroom_column
if obs is not None:
self.path_length = obs[0]
self.observer_azimuth = obs[1] # 0 to 360 clockwise from N
self.observer_zenith = obs[2] # 0 to 90 from zenith
self.solar_azimuth = obs[3] # 0 to 360 clockwise from N
self.solar_zenith = obs[4] # 0 to 90 from zenith
self.OBSZEN = 180.0 - abs(obs[2]) # MODTRAN convention?
self.RELAZ = obs[1] - obs[3] + 180.0
self.PARM1 = self.RELAZ # MODTRAN convention
self.umu = s.cos(obs[2]/360.0*2.0*s.pi) # Libradtran
else:
self.observer_azimuth = 0
self.observer_zenith = 0
self.OBSZEN = 180.0
self.RELAZ = 0.0
self.PARM1 = self.RELAZ
self.TRUEAZ = 0.0
self.umu = 1.0
if loc is not None:
self.GNDALT = loc[2]
self.altitude = loc[2]
self.surface_elevation_km = loc[2] / 1000.0
self.latitude = loc[1]
self.longitude = loc[0]
self.longitudeE = -loc[0]
if self.longitude < 0:
self.longitude = 360.0 - self.longitude
print('Geometry lat: %f, lon: %f' %
(self.latitude, self.longitude))
print('observer OBSZEN: %f, RELAZ: %f' % (self.OBSZEN, self.RELAZ))
if ds is not None:
self.datetime = datetime.strptime(ds, '%Y%m%dt%H%M%S')
self.day_of_year = self.datetime.timetuple().tm_yday
if esd is not None:
self.earth_sun_distance = esd.copy()
def coszen(self):
self.dt = self.datetime
az, zen, ra, dec, h = sunpos(self.datetime, self.latitude,
self.longitudeE, self.surface_elevation_km * 1000.0,
radians=True)
return s.cos(zen)
def sundist(self):
'''Use zero-indexed table'''
return float(self.earth_sun_distance[self.day_of_year-1, 1])
|
#! /usr/bin/env python3
#
# Copyright 2018 California Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ISOFIT: Imaging Spectrometer Optimal FITting
# Author: <NAME>, <EMAIL>
#
import scipy as s
from sunposition import sunpos
from datetime import datetime
class Geometry:
"""The geometry of the observation, all we need to calculate sensor,
surface and solar positions"""
def __init__(self, obs=None, glt=None, loc=None, ds=None,
esd=None, pushbroom_column=None):
self.earth_sun_file = None
self.observer_zenith = None
self.observer_azimuth = None
self.observer_altitude_km = None
self.surface_elevation_km = None
self.datetime = None
self.day_of_year = None
self.latitude = None
self.longitude = None
self.longitudeE = None
self.gmtime = None
self.earth_sun_distance = None
self.pushbroom_column = pushbroom_column
if obs is not None:
self.path_length = obs[0]
self.observer_azimuth = obs[1] # 0 to 360 clockwise from N
self.observer_zenith = obs[2] # 0 to 90 from zenith
self.solar_azimuth = obs[3] # 0 to 360 clockwise from N
self.solar_zenith = obs[4] # 0 to 90 from zenith
self.OBSZEN = 180.0 - abs(obs[2]) # MODTRAN convention?
self.RELAZ = obs[1] - obs[3] + 180.0
self.PARM1 = self.RELAZ # MODTRAN convention
self.umu = s.cos(obs[2]/360.0*2.0*s.pi) # Libradtran
else:
self.observer_azimuth = 0
self.observer_zenith = 0
self.OBSZEN = 180.0
self.RELAZ = 0.0
self.PARM1 = self.RELAZ
self.TRUEAZ = 0.0
self.umu = 1.0
if loc is not None:
self.GNDALT = loc[2]
self.altitude = loc[2]
self.surface_elevation_km = loc[2] / 1000.0
self.latitude = loc[1]
self.longitude = loc[0]
self.longitudeE = -loc[0]
if self.longitude < 0:
self.longitude = 360.0 - self.longitude
print('Geometry lat: %f, lon: %f' %
(self.latitude, self.longitude))
print('observer OBSZEN: %f, RELAZ: %f' % (self.OBSZEN, self.RELAZ))
if ds is not None:
self.datetime = datetime.strptime(ds, '%Y%m%dt%H%M%S')
self.day_of_year = self.datetime.timetuple().tm_yday
if esd is not None:
self.earth_sun_distance = esd.copy()
def coszen(self):
self.dt = self.datetime
az, zen, ra, dec, h = sunpos(self.datetime, self.latitude,
self.longitudeE, self.surface_elevation_km * 1000.0,
radians=True)
return s.cos(zen)
def sundist(self):
'''Use zero-indexed table'''
return float(self.earth_sun_distance[self.day_of_year-1, 1])
|
en
| 0.808797
|
#! /usr/bin/env python3 # # Copyright 2018 California Institute of Technology # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ISOFIT: Imaging Spectrometer Optimal FITting # Author: <NAME>, <EMAIL> # The geometry of the observation, all we need to calculate sensor, surface and solar positions # 0 to 360 clockwise from N # 0 to 90 from zenith # 0 to 360 clockwise from N # 0 to 90 from zenith # MODTRAN convention? # MODTRAN convention # Libradtran Use zero-indexed table
| 2.274918
| 2
|
python/wheedle/errors.py
|
kpvdr/actions-artifact-poller
| 1
|
6627710
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Error classes used by the poller.
"""
class PollerError(RuntimeError):
""" Parent class for all poller errors """
class ConfigFileError(PollerError):
""" Configuration file error(s) """
def __init__(self, config_file_name, config_section, error_msg):
if config_section is None:
super().__init__('ConfigFileError:\n Config file: {}\n Error: {}'. \
format(config_file_name, error_msg))
else:
super().__init__('ConfigFileError:\n Config file: {}\n Section: [{}]\n'
' Error: {}'.format(config_file_name, config_section, error_msg))
class ContentTypeError(PollerError):
""" Error when the returned information is not of type application/json """
def __init__(self, response):
super().__init__('ContentTypeError: GET {} returned unexpected content-type {}'.format( \
response.url[0: response.url.find('?')],
response.headers['content-type']))
self.response = response
class DisabledRepoError(PollerError):
""" Error when the GH project is disabled """
def __init__(self, repo_full_name):
super().__init__('DisabledRepoError: Repository {} is disabled'.format(repo_full_name))
class EmptyCommitListError(PollerError):
""" Error when no commits are returned from source repository """
def __init__(self, repo):
super().__init__('EmptyCommitListError: No commits were found in repository {}'.format( \
repo.full_name()))
class ErrorList(PollerError):
""" Allows multiple exception objects to be raised together """
def __init__(self, error_list):
self._error_list = error_list
err_msg = ''
if len(error_list) > 0:
first = True
for error in error_list:
if not first:
err_msg += '\n'
err_msg += str(error)
first = False
else:
err_msg = '[]'
super().__init__(err_msg)
def contains_class(self, clazz):
""" Return True if list contains class clazz """
for err in self._error_list:
if isinstance(err, clazz):
return True
return False
def __iter__(self):
return self._error_list.__iter__()
class GhConnectionRefusedError(PollerError):
""" Connection refused to a given URL """
def __init__(self, url):
super().__init__('GhConnectionRefusedError: Connection refused to URL "{}"'.format(url))
class HttpError(PollerError):
""" Error when a HTTP GET request returns anything other than 200 (ok) """
def __init__(self, method, response, msg=None):
msg_suffix = '' if msg is not None else '\n {}'.format(msg)
super().__init__('HttpError: {} to "{}" returned status {} ({}){}'.format( \
method, response.url[0: response.url.find('?')], response.status_code, response.reason,
msg_suffix))
self.response = response
class JsonDecodeError(PollerError):
""" Error reading a JSON data file """
def __init__(self, file_name, json_err):
super().__init__('JsonDecodeError: File "{}": {}'.format(file_name, json_err))
self.json_err = json_err
class ServiceConnectionError(PollerError):
""" Error when the connection to a service fails """
def __init__(self, service_name, service_url):
super().__init__('ServiceConnectionError: {0} not running or invalid {0} URL {1}'.format( \
service_name, service_url))
class TokenNotFoundError(PollerError):
""" Error if GitHub token not found """
def __init__(self, token_file_name):
super().__init__('TokenNotFoundError: GitHub token "{}" file not found'.format( \
token_file_name))
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Error classes used by the poller.
"""
class PollerError(RuntimeError):
""" Parent class for all poller errors """
class ConfigFileError(PollerError):
""" Configuration file error(s) """
def __init__(self, config_file_name, config_section, error_msg):
if config_section is None:
super().__init__('ConfigFileError:\n Config file: {}\n Error: {}'. \
format(config_file_name, error_msg))
else:
super().__init__('ConfigFileError:\n Config file: {}\n Section: [{}]\n'
' Error: {}'.format(config_file_name, config_section, error_msg))
class ContentTypeError(PollerError):
""" Error when the returned information is not of type application/json """
def __init__(self, response):
super().__init__('ContentTypeError: GET {} returned unexpected content-type {}'.format( \
response.url[0: response.url.find('?')],
response.headers['content-type']))
self.response = response
class DisabledRepoError(PollerError):
""" Error when the GH project is disabled """
def __init__(self, repo_full_name):
super().__init__('DisabledRepoError: Repository {} is disabled'.format(repo_full_name))
class EmptyCommitListError(PollerError):
""" Error when no commits are returned from source repository """
def __init__(self, repo):
super().__init__('EmptyCommitListError: No commits were found in repository {}'.format( \
repo.full_name()))
class ErrorList(PollerError):
""" Allows multiple exception objects to be raised together """
def __init__(self, error_list):
self._error_list = error_list
err_msg = ''
if len(error_list) > 0:
first = True
for error in error_list:
if not first:
err_msg += '\n'
err_msg += str(error)
first = False
else:
err_msg = '[]'
super().__init__(err_msg)
def contains_class(self, clazz):
""" Return True if list contains class clazz """
for err in self._error_list:
if isinstance(err, clazz):
return True
return False
def __iter__(self):
return self._error_list.__iter__()
class GhConnectionRefusedError(PollerError):
""" Connection refused to a given URL """
def __init__(self, url):
super().__init__('GhConnectionRefusedError: Connection refused to URL "{}"'.format(url))
class HttpError(PollerError):
""" Error when a HTTP GET request returns anything other than 200 (ok) """
def __init__(self, method, response, msg=None):
msg_suffix = '' if msg is not None else '\n {}'.format(msg)
super().__init__('HttpError: {} to "{}" returned status {} ({}){}'.format( \
method, response.url[0: response.url.find('?')], response.status_code, response.reason,
msg_suffix))
self.response = response
class JsonDecodeError(PollerError):
""" Error reading a JSON data file """
def __init__(self, file_name, json_err):
super().__init__('JsonDecodeError: File "{}": {}'.format(file_name, json_err))
self.json_err = json_err
class ServiceConnectionError(PollerError):
""" Error when the connection to a service fails """
def __init__(self, service_name, service_url):
super().__init__('ServiceConnectionError: {0} not running or invalid {0} URL {1}'.format( \
service_name, service_url))
class TokenNotFoundError(PollerError):
""" Error if GitHub token not found """
def __init__(self, token_file_name):
super().__init__('TokenNotFoundError: GitHub token "{}" file not found'.format( \
token_file_name))
|
en
| 0.823258
|
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Error classes used by the poller. Parent class for all poller errors Configuration file error(s) Error when the returned information is not of type application/json Error when the GH project is disabled Error when no commits are returned from source repository Allows multiple exception objects to be raised together Return True if list contains class clazz Connection refused to a given URL Error when a HTTP GET request returns anything other than 200 (ok) Error reading a JSON data file Error when the connection to a service fails Error if GitHub token not found
| 1.845249
| 2
|
aiida_abacus/data/__init__.py
|
materials-science/aiida-abacus
| 4
|
6627711
|
# -*- coding: utf-8 -*-
"""
Data types provided by plugin
Register data types via the "aiida.data" entry point in setup.json.
"""
# You can directly use or subclass aiida.orm.data.Data
# or any other data type listed under 'verdi data'
from voluptuous import Schema, Optional
from aiida.orm import Dict
# A subset of diff's command line options
cmdline_options = {
Optional('ignore-case'): bool,
Optional('ignore-file-name-case'): bool,
Optional('ignore-tab-expansion'): bool,
Optional('ignore-space-change'): bool,
Optional('ignore-all-space'): bool,
}
class DiffParameters(Dict): # pylint: disable=too-many-ancestors
"""
Command line options for diff.
This class represents a python dictionary used to
pass command line options to the executable.
"""
# "voluptuous" schema to add automatic validation
schema = Schema(cmdline_options)
# pylint: disable=redefined-builtin
def __init__(self, dict=None, **kwargs):
"""
Constructor for the data class
Usage: ``DiffParameters(dict{'ignore-case': True})``
:param parameters_dict: dictionary with commandline parameters
:param type parameters_dict: dict
"""
dict = self.validate(dict)
super().__init__(dict=dict, **kwargs)
def validate(self, parameters_dict): # pylint: disable=no-self-use
"""Validate command line options.
Uses the voluptuous package for validation. Find out about allowed keys using::
print(DiffParameters).schema.schema
:param parameters_dict: dictionary with commandline parameters
:param type parameters_dict: dict
:returns: validated dictionary
"""
return DiffParameters.schema(parameters_dict)
def cmdline_params(self, file1_name, file2_name):
"""Synthesize command line parameters.
e.g. [ '--ignore-case', 'filename1', 'filename2']
:param file_name1: Name of first file
:param type file_name1: str
:param file_name2: Name of second file
:param type file_name2: str
"""
parameters = []
pm_dict = self.get_dict()
for k in pm_dict.keys():
if pm_dict[k]:
parameters += ['--' + k]
parameters += [file1_name, file2_name]
return [str(p) for p in parameters]
def __str__(self):
"""String representation of node.
Append values of dictionary to usual representation. E.g.::
uuid: b416cbee-24e8-47a8-8c11-6d668770158b (pk: 590)
{'ignore-case': True}
"""
string = super().__str__()
string += '\n' + str(self.get_dict())
return string
|
# -*- coding: utf-8 -*-
"""
Data types provided by plugin
Register data types via the "aiida.data" entry point in setup.json.
"""
# You can directly use or subclass aiida.orm.data.Data
# or any other data type listed under 'verdi data'
from voluptuous import Schema, Optional
from aiida.orm import Dict
# A subset of diff's command line options
cmdline_options = {
Optional('ignore-case'): bool,
Optional('ignore-file-name-case'): bool,
Optional('ignore-tab-expansion'): bool,
Optional('ignore-space-change'): bool,
Optional('ignore-all-space'): bool,
}
class DiffParameters(Dict): # pylint: disable=too-many-ancestors
"""
Command line options for diff.
This class represents a python dictionary used to
pass command line options to the executable.
"""
# "voluptuous" schema to add automatic validation
schema = Schema(cmdline_options)
# pylint: disable=redefined-builtin
def __init__(self, dict=None, **kwargs):
"""
Constructor for the data class
Usage: ``DiffParameters(dict{'ignore-case': True})``
:param parameters_dict: dictionary with commandline parameters
:param type parameters_dict: dict
"""
dict = self.validate(dict)
super().__init__(dict=dict, **kwargs)
def validate(self, parameters_dict): # pylint: disable=no-self-use
"""Validate command line options.
Uses the voluptuous package for validation. Find out about allowed keys using::
print(DiffParameters).schema.schema
:param parameters_dict: dictionary with commandline parameters
:param type parameters_dict: dict
:returns: validated dictionary
"""
return DiffParameters.schema(parameters_dict)
def cmdline_params(self, file1_name, file2_name):
"""Synthesize command line parameters.
e.g. [ '--ignore-case', 'filename1', 'filename2']
:param file_name1: Name of first file
:param type file_name1: str
:param file_name2: Name of second file
:param type file_name2: str
"""
parameters = []
pm_dict = self.get_dict()
for k in pm_dict.keys():
if pm_dict[k]:
parameters += ['--' + k]
parameters += [file1_name, file2_name]
return [str(p) for p in parameters]
def __str__(self):
"""String representation of node.
Append values of dictionary to usual representation. E.g.::
uuid: b416cbee-24e8-47a8-8c11-6d668770158b (pk: 590)
{'ignore-case': True}
"""
string = super().__str__()
string += '\n' + str(self.get_dict())
return string
|
en
| 0.504847
|
# -*- coding: utf-8 -*- Data types provided by plugin Register data types via the "aiida.data" entry point in setup.json. # You can directly use or subclass aiida.orm.data.Data # or any other data type listed under 'verdi data' # A subset of diff's command line options # pylint: disable=too-many-ancestors Command line options for diff. This class represents a python dictionary used to pass command line options to the executable. # "voluptuous" schema to add automatic validation # pylint: disable=redefined-builtin Constructor for the data class Usage: ``DiffParameters(dict{'ignore-case': True})`` :param parameters_dict: dictionary with commandline parameters :param type parameters_dict: dict # pylint: disable=no-self-use Validate command line options. Uses the voluptuous package for validation. Find out about allowed keys using:: print(DiffParameters).schema.schema :param parameters_dict: dictionary with commandline parameters :param type parameters_dict: dict :returns: validated dictionary Synthesize command line parameters. e.g. [ '--ignore-case', 'filename1', 'filename2'] :param file_name1: Name of first file :param type file_name1: str :param file_name2: Name of second file :param type file_name2: str String representation of node. Append values of dictionary to usual representation. E.g.:: uuid: b416cbee-24e8-47a8-8c11-6d668770158b (pk: 590) {'ignore-case': True}
| 2.235081
| 2
|
src/look-around/eval.py
|
srama2512/visual-exploration
| 12
|
6627712
|
"""
Script to evaluate look-around policies
"""
import os
import sys
import pdb
import json
import torch
import random
import argparse
import torchvision
import tensorboardX
import torch.optim as optim
import torchvision.utils as vutils
from envs import *
from utils import *
from agent import *
from base.common import *
from tensorboardX import SummaryWriter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Optimization options
parser.add_argument('--h5_path_unseen', type=str, default='')
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--batch_size', type=int, default=32)
# Agent options
parser.add_argument('--T', type=int, default=-1)
parser.add_argument('--iscuda', type=str2bool, default=True)
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--greedy', type=str2bool, default=True)
parser.add_argument('--memorize_views', type=str2bool, default=True)
parser.add_argument('--actorType', type=str, default='unset', help='[ actor | random | saved_trajectories | const_action | peek_saliency ]')
parser.add_argument('--const_act', type=int, default=-1, help='constant action to execute under const_action')
# Environment options
parser.add_argument('--start_view', type=int, default=0, help='[0 - random, 1 - center, 2 - alternate positions, 3 - adversarial]')
parser.add_argument('--save_path', type=str, default='', help='Path to directory to save some sample results')
parser.add_argument('--utility_h5_path', type=str, default='', help='Stored utility maps from one-view expert to obtain expert trajectories')
parser.add_argument('--trajectories_type', type=str, default='utility_maps', help='[utility_maps | expert_trajectories | saliency_scores]')
parser.add_argument('--eval_val', type=str2bool, default=False, help='Evaluate on validation set?')
opts = parser.parse_args()
opts.mask_path = ''
opts.shuffle = False
opts.init = 'xavier'
opts.reward_scale = 1
opts.start_views_json = ''
opts.expert_rewards = False
opts.supervised_scale = 1e-2
opts.reward_scale_expert = 1e-4
opts.expert_trajectories = False
loaded_state = torch.load(opts.model_path)
if opts.T == -1:
opts.T = loaded_state['opts'].T
opts.M = loaded_state['opts'].M
opts.N = loaded_state['opts'].N
opts.delta_M = loaded_state['opts'].delta_M
opts.delta_N = loaded_state['opts'].delta_N
opts.h5_path = loaded_state['opts'].h5_path
opts.dataset = loaded_state['opts'].dataset
opts.actOnElev = loaded_state['opts'].actOnElev
opts.actOnAzim = loaded_state['opts'].actOnAzim
opts.actOnTime = loaded_state['opts'].actOnTime
opts.knownElev = loaded_state['opts'].knownElev
opts.knownAzim = loaded_state['opts'].knownAzim
opts.wrap_azimuth = loaded_state['opts'].wrap_azimuth
opts.wrap_elevation = loaded_state['opts'].wrap_elevation
opts.act_to_delta = loaded_state['opts'].act_to_delta
opts.delta_to_act = loaded_state['opts'].delta_to_act
opts.mean_subtract = loaded_state['opts'].mean_subtract
if opts.actorType == 'unset':
opts.actorType = loaded_state['opts'].actorType
if opts.const_act == -1:
if hasattr(loaded_state['opts'], 'const_act'):
opts.const_act = loaded_state['opts'].const_act
opts.baselineType = loaded_state['opts'].baselineType
opts.act_full_obs = loaded_state['opts'].act_full_obs
opts.critic_full_obs = loaded_state['opts'].critic_full_obs
opts.A = opts.delta_M * opts.delta_N
opts.P = opts.delta_M * opts.N
random.seed(opts.seed)
np.random.seed(opts.seed)
torch.manual_seed(opts.seed)
torch.cuda.manual_seed_all(opts.seed)
if opts.actorType == 'saved_trajectories' or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':
from data_loader import DataLoaderExpertPolicy as DataLoader
else:
from data_loader import DataLoaderSimple as DataLoader
if opts.dataset == 0:
if opts.mean_subtract:
opts.mean = [119.16, 107.68, 95.12]
opts.std = [61.88, 61.72, 67.24]
else:
opts.mean = [0, 0, 0]
opts.std = [1, 1, 1]
opts.num_channels = 3
elif opts.dataset == 1:
if opts.mean_subtract:
opts.mean = [193.0162338615919]
opts.std = [37.716024486312811]
else:
opts.mean = [0]
opts.std = [0]
opts.num_channels = 1
else:
raise ValueError('Dataset %d does not exist!'%(opts.dataset))
loader = DataLoader(opts)
agent = Agent(opts, mode='eval')
agent.policy.load_state_dict(loaded_state['state_dict'], strict=False)
if opts.start_view != 3:
if opts.eval_val:
val_err, val_std, val_std_err, _ = evaluate(loader, agent, 'val', opts)
else:
test_err, test_std, test_std_err, decoded_images = evaluate(loader, agent, 'test', opts)
if opts.dataset == 1:
if opts.h5_path_unseen != '':
test_unseen_err, test_unseen_std, test_unseen_std_err, decoded_images_unseen = evaluate(loader, agent, 'test_unseen', opts)
else:
if opts.eval_val:
val_err, val_std, val_std_err, _ = evaluate(loader, agent, 'val', opts)
else:
test_err, test_std, test_std_err, decoded_images = evaluate_adversarial(loader, agent, 'test', opts)
if opts.dataset == 1 and opts.h5_path_unseen != '':
test_unseen_err, test_unseen_std, test_unseen_std_err, decoded_images_unseen = evaluate_adversarial(loader, agent, 'test_unseen', opts)
if not opts.eval_val:
writer = SummaryWriter(log_dir=opts.save_path)
count_choice = min(loader.counts['test'] // opts.batch_size, 10)
rng_choices = random.sample(range(loader.counts['test']//opts.batch_size), count_choice)
for choice in rng_choices:
for pano_count in range(decoded_images[choice].size(0)):
x = vutils.make_grid(decoded_images[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T+1, pad_value=1.0)
writer.add_image('Test batch #%d, image #%d'%(choice, pano_count), x, 0)
if opts.dataset == 1:
if opts.h5_path_unseen != '':
count_choice = min(loader.counts['test_unseen'] // opts.batch_size, 10)
rng_choices = random.sample(range(loader.counts['test_unseen']//opts.batch_size), count_choice)
for choice in rng_choices:
for pano_count in range(decoded_images_unseen[choice].size(0)):
x = vutils.make_grid(decoded_images_unseen[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T+1, pad_value=1.0)
writer.add_image('Test unseen batch #%d, image #%d'%(choice, pano_count), x, 0)
if opts.eval_val:
print('Val mean(x1000): %6.3f | std(x1000): %6.3f | std err(x1000): %6.3f'%(val_err*1000, val_std*1000, val_std_err*1000))
else:
print('===== Test error =====')
print('%6.3f'%(test_err * 1000))
if opts.dataset == 1:
if opts.h5_path_unseen != '':
print('===== Test unseen error =====')
print('%6.3f'%(test_unseen_err * 1000))
writer.close()
|
"""
Script to evaluate look-around policies
"""
import os
import sys
import pdb
import json
import torch
import random
import argparse
import torchvision
import tensorboardX
import torch.optim as optim
import torchvision.utils as vutils
from envs import *
from utils import *
from agent import *
from base.common import *
from tensorboardX import SummaryWriter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Optimization options
parser.add_argument('--h5_path_unseen', type=str, default='')
parser.add_argument('--seed', type=int, default=123)
parser.add_argument('--batch_size', type=int, default=32)
# Agent options
parser.add_argument('--T', type=int, default=-1)
parser.add_argument('--iscuda', type=str2bool, default=True)
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--greedy', type=str2bool, default=True)
parser.add_argument('--memorize_views', type=str2bool, default=True)
parser.add_argument('--actorType', type=str, default='unset', help='[ actor | random | saved_trajectories | const_action | peek_saliency ]')
parser.add_argument('--const_act', type=int, default=-1, help='constant action to execute under const_action')
# Environment options
parser.add_argument('--start_view', type=int, default=0, help='[0 - random, 1 - center, 2 - alternate positions, 3 - adversarial]')
parser.add_argument('--save_path', type=str, default='', help='Path to directory to save some sample results')
parser.add_argument('--utility_h5_path', type=str, default='', help='Stored utility maps from one-view expert to obtain expert trajectories')
parser.add_argument('--trajectories_type', type=str, default='utility_maps', help='[utility_maps | expert_trajectories | saliency_scores]')
parser.add_argument('--eval_val', type=str2bool, default=False, help='Evaluate on validation set?')
opts = parser.parse_args()
opts.mask_path = ''
opts.shuffle = False
opts.init = 'xavier'
opts.reward_scale = 1
opts.start_views_json = ''
opts.expert_rewards = False
opts.supervised_scale = 1e-2
opts.reward_scale_expert = 1e-4
opts.expert_trajectories = False
loaded_state = torch.load(opts.model_path)
if opts.T == -1:
opts.T = loaded_state['opts'].T
opts.M = loaded_state['opts'].M
opts.N = loaded_state['opts'].N
opts.delta_M = loaded_state['opts'].delta_M
opts.delta_N = loaded_state['opts'].delta_N
opts.h5_path = loaded_state['opts'].h5_path
opts.dataset = loaded_state['opts'].dataset
opts.actOnElev = loaded_state['opts'].actOnElev
opts.actOnAzim = loaded_state['opts'].actOnAzim
opts.actOnTime = loaded_state['opts'].actOnTime
opts.knownElev = loaded_state['opts'].knownElev
opts.knownAzim = loaded_state['opts'].knownAzim
opts.wrap_azimuth = loaded_state['opts'].wrap_azimuth
opts.wrap_elevation = loaded_state['opts'].wrap_elevation
opts.act_to_delta = loaded_state['opts'].act_to_delta
opts.delta_to_act = loaded_state['opts'].delta_to_act
opts.mean_subtract = loaded_state['opts'].mean_subtract
if opts.actorType == 'unset':
opts.actorType = loaded_state['opts'].actorType
if opts.const_act == -1:
if hasattr(loaded_state['opts'], 'const_act'):
opts.const_act = loaded_state['opts'].const_act
opts.baselineType = loaded_state['opts'].baselineType
opts.act_full_obs = loaded_state['opts'].act_full_obs
opts.critic_full_obs = loaded_state['opts'].critic_full_obs
opts.A = opts.delta_M * opts.delta_N
opts.P = opts.delta_M * opts.N
random.seed(opts.seed)
np.random.seed(opts.seed)
torch.manual_seed(opts.seed)
torch.cuda.manual_seed_all(opts.seed)
if opts.actorType == 'saved_trajectories' or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':
from data_loader import DataLoaderExpertPolicy as DataLoader
else:
from data_loader import DataLoaderSimple as DataLoader
if opts.dataset == 0:
if opts.mean_subtract:
opts.mean = [119.16, 107.68, 95.12]
opts.std = [61.88, 61.72, 67.24]
else:
opts.mean = [0, 0, 0]
opts.std = [1, 1, 1]
opts.num_channels = 3
elif opts.dataset == 1:
if opts.mean_subtract:
opts.mean = [193.0162338615919]
opts.std = [37.716024486312811]
else:
opts.mean = [0]
opts.std = [0]
opts.num_channels = 1
else:
raise ValueError('Dataset %d does not exist!'%(opts.dataset))
loader = DataLoader(opts)
agent = Agent(opts, mode='eval')
agent.policy.load_state_dict(loaded_state['state_dict'], strict=False)
if opts.start_view != 3:
if opts.eval_val:
val_err, val_std, val_std_err, _ = evaluate(loader, agent, 'val', opts)
else:
test_err, test_std, test_std_err, decoded_images = evaluate(loader, agent, 'test', opts)
if opts.dataset == 1:
if opts.h5_path_unseen != '':
test_unseen_err, test_unseen_std, test_unseen_std_err, decoded_images_unseen = evaluate(loader, agent, 'test_unseen', opts)
else:
if opts.eval_val:
val_err, val_std, val_std_err, _ = evaluate(loader, agent, 'val', opts)
else:
test_err, test_std, test_std_err, decoded_images = evaluate_adversarial(loader, agent, 'test', opts)
if opts.dataset == 1 and opts.h5_path_unseen != '':
test_unseen_err, test_unseen_std, test_unseen_std_err, decoded_images_unseen = evaluate_adversarial(loader, agent, 'test_unseen', opts)
if not opts.eval_val:
writer = SummaryWriter(log_dir=opts.save_path)
count_choice = min(loader.counts['test'] // opts.batch_size, 10)
rng_choices = random.sample(range(loader.counts['test']//opts.batch_size), count_choice)
for choice in rng_choices:
for pano_count in range(decoded_images[choice].size(0)):
x = vutils.make_grid(decoded_images[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T+1, pad_value=1.0)
writer.add_image('Test batch #%d, image #%d'%(choice, pano_count), x, 0)
if opts.dataset == 1:
if opts.h5_path_unseen != '':
count_choice = min(loader.counts['test_unseen'] // opts.batch_size, 10)
rng_choices = random.sample(range(loader.counts['test_unseen']//opts.batch_size), count_choice)
for choice in rng_choices:
for pano_count in range(decoded_images_unseen[choice].size(0)):
x = vutils.make_grid(decoded_images_unseen[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T+1, pad_value=1.0)
writer.add_image('Test unseen batch #%d, image #%d'%(choice, pano_count), x, 0)
if opts.eval_val:
print('Val mean(x1000): %6.3f | std(x1000): %6.3f | std err(x1000): %6.3f'%(val_err*1000, val_std*1000, val_std_err*1000))
else:
print('===== Test error =====')
print('%6.3f'%(test_err * 1000))
if opts.dataset == 1:
if opts.h5_path_unseen != '':
print('===== Test unseen error =====')
print('%6.3f'%(test_unseen_err * 1000))
writer.close()
|
en
| 0.362077
|
Script to evaluate look-around policies # Optimization options # Agent options # Environment options #%d, image #%d'%(choice, pano_count), x, 0) #%d, image #%d'%(choice, pano_count), x, 0)
| 2.029099
| 2
|
scripts/training/reflection/training/dataset_processing/multi_objective/multi_split.py
|
DanJSG/reflectiment
| 0
|
6627713
|
<filename>scripts/training/reflection/training/dataset_processing/multi_objective/multi_split.py
from math import floor
from random import shuffle
from nltk import word_tokenize
import string
bawe_sentences = open("./dataset_processing/bawe/sentences.txt", "r").readlines()
bawe_scores = open("./dataset_processing/bawe/multi_scores.txt", "r").readlines()
york_sentences = open("./dataset_processing/york/sentences.txt", "r").readlines()
york_scores = open("./dataset_processing/york/multi_scores.txt", "r").readlines()
total_samples = len(bawe_sentences) + len(york_sentences)
n_test_samples = floor(total_samples * 0.3)
n_train_samples = total_samples - n_test_samples
print(total_samples)
print(n_test_samples)
print(n_train_samples)
test_sentences_file = open("./dataset_processing/combined/multi_objective/sentences.test.txt", "w+")
test_scores_file = open("./dataset_processing/combined/multi_objective/scores.test.txt", "w+")
train_sentences_file = open("./dataset_processing/combined/multi_objective/sentences.train.txt", "w+")
train_scores_file = open("./dataset_processing/combined/multi_objective/scores.train.txt", "w+")
sent_scores = list(zip(york_sentences, york_scores))
shuffle(sent_scores)
york_sentences, york_scores = zip(*sent_scores)
count = 0
for sentence, score in zip(york_sentences, york_scores):
sentence = sentence.strip("\n")
score = score.strip("\n")
if count < n_test_samples:
test_sentences_file.write(f"{sentence}\n")
test_scores_file.write(f"{score}\n")
else:
train_sentences_file.write(f"{sentence}\n")
train_scores_file.write(f"{score}\n")
count += 1
for sentence, score in zip(bawe_sentences, bawe_scores):
sentence = sentence.translate(str.maketrans('','', string.punctuation)).lower().strip("\n").strip()
sentence = " ".join(word_tokenize(sentence))
score = score.strip("\n")
train_sentences_file.write(f"{sentence}\n")
train_scores_file.write(f"{score}\n")
|
<filename>scripts/training/reflection/training/dataset_processing/multi_objective/multi_split.py
from math import floor
from random import shuffle
from nltk import word_tokenize
import string
bawe_sentences = open("./dataset_processing/bawe/sentences.txt", "r").readlines()
bawe_scores = open("./dataset_processing/bawe/multi_scores.txt", "r").readlines()
york_sentences = open("./dataset_processing/york/sentences.txt", "r").readlines()
york_scores = open("./dataset_processing/york/multi_scores.txt", "r").readlines()
total_samples = len(bawe_sentences) + len(york_sentences)
n_test_samples = floor(total_samples * 0.3)
n_train_samples = total_samples - n_test_samples
print(total_samples)
print(n_test_samples)
print(n_train_samples)
test_sentences_file = open("./dataset_processing/combined/multi_objective/sentences.test.txt", "w+")
test_scores_file = open("./dataset_processing/combined/multi_objective/scores.test.txt", "w+")
train_sentences_file = open("./dataset_processing/combined/multi_objective/sentences.train.txt", "w+")
train_scores_file = open("./dataset_processing/combined/multi_objective/scores.train.txt", "w+")
sent_scores = list(zip(york_sentences, york_scores))
shuffle(sent_scores)
york_sentences, york_scores = zip(*sent_scores)
count = 0
for sentence, score in zip(york_sentences, york_scores):
sentence = sentence.strip("\n")
score = score.strip("\n")
if count < n_test_samples:
test_sentences_file.write(f"{sentence}\n")
test_scores_file.write(f"{score}\n")
else:
train_sentences_file.write(f"{sentence}\n")
train_scores_file.write(f"{score}\n")
count += 1
for sentence, score in zip(bawe_sentences, bawe_scores):
sentence = sentence.translate(str.maketrans('','', string.punctuation)).lower().strip("\n").strip()
sentence = " ".join(word_tokenize(sentence))
score = score.strip("\n")
train_sentences_file.write(f"{sentence}\n")
train_scores_file.write(f"{score}\n")
|
none
| 1
| 2.864765
| 3
|
|
A2C/simplePG.py
|
CommanderCero/RL_Algorithms
| 1
|
6627714
|
import numpy as np
import torch
import torch.nn as nn
import gym
import models
import utils
def train(policy: models.Policy, env, train_steps = 1000, reward_decay=0.99, learning_rate=0.001):
action_cache = []
state_cache = []
reward_cache = []
optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
for step in range(train_steps):
# Clear cache
action_cache.clear()
state_cache.clear()
reward_cache.clear()
# Collect data from one trajectory
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state])
action = policy.get_actions(inp)[0]
new_state, reward, done, _ = env.step(action)
# Collect data
action_cache.append(action)
state_cache.append(state)
reward_cache.append(reward)
state = new_state
# Compute "Loss"-function for computing the policy gradient
rewards_to_go = np.array(utils.discounted_cumsum(reward_cache, reward_decay))
dist = policy(torch.Tensor(state_cache))
log_probs = policy.get_log_probs(dist, torch.Tensor(action_cache))
loss = torch.mean(-log_probs * torch.Tensor(rewards_to_go.reshape(log_probs.shape)))
# Gradient descent (Technically ascend since we took the negative of the policy gradient)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
print(f"Loss={loss}\tReward Sum={np.sum(rewards_to_go)}")
if __name__ == "__main__":
env = gym.make("CartPole-v0")
policy = models.SoftmaxPolicy(nn.Sequential(
nn.Linear(4, 32),
nn.ReLU(),
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 2)
))
train(policy, env)
env = gym.make("CartPole-v0")
utils.play(policy, env)
|
import numpy as np
import torch
import torch.nn as nn
import gym
import models
import utils
def train(policy: models.Policy, env, train_steps = 1000, reward_decay=0.99, learning_rate=0.001):
action_cache = []
state_cache = []
reward_cache = []
optimizer = torch.optim.Adam(policy.parameters(), lr=learning_rate)
for step in range(train_steps):
# Clear cache
action_cache.clear()
state_cache.clear()
reward_cache.clear()
# Collect data from one trajectory
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state])
action = policy.get_actions(inp)[0]
new_state, reward, done, _ = env.step(action)
# Collect data
action_cache.append(action)
state_cache.append(state)
reward_cache.append(reward)
state = new_state
# Compute "Loss"-function for computing the policy gradient
rewards_to_go = np.array(utils.discounted_cumsum(reward_cache, reward_decay))
dist = policy(torch.Tensor(state_cache))
log_probs = policy.get_log_probs(dist, torch.Tensor(action_cache))
loss = torch.mean(-log_probs * torch.Tensor(rewards_to_go.reshape(log_probs.shape)))
# Gradient descent (Technically ascend since we took the negative of the policy gradient)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
print(f"Loss={loss}\tReward Sum={np.sum(rewards_to_go)}")
if __name__ == "__main__":
env = gym.make("CartPole-v0")
policy = models.SoftmaxPolicy(nn.Sequential(
nn.Linear(4, 32),
nn.ReLU(),
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 2)
))
train(policy, env)
env = gym.make("CartPole-v0")
utils.play(policy, env)
|
en
| 0.843759
|
# Clear cache # Collect data from one trajectory # Collect data # Compute "Loss"-function for computing the policy gradient # Gradient descent (Technically ascend since we took the negative of the policy gradient)
| 2.989241
| 3
|
chiebukuro2.py
|
l-plantarum/chiebukuro
| 0
|
6627715
|
<filename>chiebukuro2.py
#!/usr/bin/python3
# coding=utf-8
import time
import urllib.request
from bs4 import BeautifulSoup
import sys
from pymongo import MongoClient
import re
import datetime
import json
import syslog
# 指定した記事を開き,取り消し済みならNoneを返す
def urlopen(url):
try:
resp = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
if e.code == 404: # これは取り消し済
return None
else: # どの道中身は返せない
return None
else:
return resp
# 指定したURLのページを情報を取得しMongoDBに追加する
# True: DBの内容を変更した
# False: DBの内容はそのまま
def insertQuestion(url, main, mainlink):
# 登録済URLなら以下の処理は実施しない
client = MongoClient('mongodb://localhost:27017')
db = client.local
res = db.qa.find({"url":url})
if res.count() != 0:
return False
# 取り消し済みなら何もしない
if "cancel" not in res == True:
return False
# 検索クエリを発行
resp = urlopen(url)
# 最初から取り消ししてあれば何もしない
if resp == None:
return False
src = resp.read()
soup = BeautifulSoup(src, 'lxml')
# 質問
usrQ = soup.find("div", class_="usrQstn")
# 質問者情報
# usrInfo = usrQ.find("div", class_="usrInfo")
# author = usrInfo.find("p", class_="usrNm")
author = soup.find("div", class_="ClapLv1UserInfo_Chie-UserInfo__UserName__1bJYU")
# posttime = usrInfo.find("p", class_="upDt")
posttime = soup.find("p", class_="ClapLv1UserInfo_Chie-UserInfo__Date__2F1LF")
# <p>日付<span>時間</span></p>から時間を取得後,時間を除去
# time = posttime.span.string
daytime = posttime.text.split(' ')
time = daytime[1]
day = daytime[0]
arr = day.split('/')
if (len(arr[1]) == 1):
arr[1] = '0' + arr[1]
if (len(arr[2]) == 1):
arr[2] = '0' + arr[2]
day = '/'.join(arr)
# 投稿
# question = usrQ.find("div", class_="ptsQes")
question = soup.find("p", class_="yjDirectSLinkTarget ClapLv1TextBlock_Chie-TextBlock__Text__1jsQC ClapLv1TextBlock_Chie-TextBlock__Text--medium__3nLu4 ClapLv1TextBlock_Chie-TextBlock__Text--SpaceOut__3kF8R ClapLv1TextBlock_Chie-TextBlock__Text--preLine__2SRma")
# 質問の前後にタブがいっぱい入るので除去
# 一行しかない質問なら配列の長さは1
# 複数行あれば配列の長さは2
#car = re.sub('^s+$', '', re.sub(r'^\s+', '', qbody[0].text))
car = re.sub('^s+$', '', re.sub(r'^\s+', '', question.text))
#if (len(qbody) == 2):
# cdr = re.sub(r'\s+$', '', re.sub(r'^\s+', '', qbody[1].text))
# 補足
qsup = question.find("p", class_="queTxtsup")
if (qsup != None):
sup = qsup.text
else:
sup = ''
# お礼
thxpt = question.find("cc", class_="ClapLv2QuestionItem_Chie-QuestionItem__SubChieCoin__2akxj")
if (thxpt != None):
point = thxpt.text
else:
point = 0
# 現在時刻
now = datetime.datetime.now()
# MongoDBに接続
client = MongoClient('mongodb://localhost:27017')
db = client.local
# mongoへの書き込み
data = {
# 'author': author.text,
'url' : url,
'getdate': now.strftime("%Y/%m/%d %H:%M:%S"),
'postdate': day + ' ' + time,
'main': main,
'mainlink': mainlink,
'body': car,
'point': point,
'sup': sup
}
db.qa.insert_one(data)
client.close()
return True
# syslogに質問件数を書き込む
def outputCount(msg):
client = MongoClient('mongodb://localhost:27017')
db = client.local
count = db.qa.count()
syslog.openlog("chiebukuro")
syslog.syslog(msg + ":" + str(count))
client.close()
# 知恵袋・大学入試カテゴリのトップページ
url = 'https://chiebukuro.yahoo.co.jp/dir/list.php?did=2079405665&flg=3&type=list&sort=2'
# トップページの情報を取得
resp = urllib.request.urlopen(url)
# オプション
batchMode = False
allMode = False
for i in range(1, len(sys.argv)):
# バッチモード
if sys.argv[i] == "--batch":
batchMode = True
if sys.argv[i] == "--all":
allMode = True
if batchMode == True:
outputCount("begin")
breakFlag = False
series = 0
# 最後までクロールしたらbreakする
while True:
src = resp.read()
soup = BeautifulSoup(src, 'lxml')
qalst = soup.find("div", id="qa_lst")
qas = qalst.find_all("div", class_="ClapLv3List_Chie-List__ListItem__y_P8W")
# 一画面分
for qa in qas:
ithref = qa.find("a")
ittext = qa.find("h2")
# クロール中の記事のタイトルとURLの表示
if batchMode == False:
print('text:'+ittext.text)
print('href:'+ithref.get('href'))
mainq = qa.find("div", class_="ClapLv1TextBlock_Chie-TextBlock__3X4V5")
if mainq.text == "大学受験":
# 当該カテゴリのみへの投稿
dbFlag = insertQuestion(ithref.get('href'), '', '')
else:
# 複数カテゴリへの投稿
dbFlag = insertQuestion(ithref.get('href'), mainq.text, mainq.get('href'))
if dbFlag == False:
series = series + 1
else:
series = 0
time.sleep(1)
if allMode == False and series == 10:
breakFlag = True
break
if breakFlag == True:
break
# 次へのリンクを探す
anchor = soup.find("a", class_="ClapLv1Pagination_Chie-Pagination__Anchor--Next__3keHe")
# なければ終了(最後までクロールした)
if anchor == None:
break
url = anchor.get("href")
time.sleep(10)
# 次のクロールする質問リスト
if batchMode == False:
print('anchor:'+url)
else:
syslog.syslog(url)
resp = urllib.request.urlopen('https://chiebukuro.yahoo.co.jp' + url)
if batchMode == True:
outputCount("begin")
|
<filename>chiebukuro2.py
#!/usr/bin/python3
# coding=utf-8
import time
import urllib.request
from bs4 import BeautifulSoup
import sys
from pymongo import MongoClient
import re
import datetime
import json
import syslog
# 指定した記事を開き,取り消し済みならNoneを返す
def urlopen(url):
try:
resp = urllib.request.urlopen(url)
except urllib.error.HTTPError as e:
if e.code == 404: # これは取り消し済
return None
else: # どの道中身は返せない
return None
else:
return resp
# 指定したURLのページを情報を取得しMongoDBに追加する
# True: DBの内容を変更した
# False: DBの内容はそのまま
def insertQuestion(url, main, mainlink):
# 登録済URLなら以下の処理は実施しない
client = MongoClient('mongodb://localhost:27017')
db = client.local
res = db.qa.find({"url":url})
if res.count() != 0:
return False
# 取り消し済みなら何もしない
if "cancel" not in res == True:
return False
# 検索クエリを発行
resp = urlopen(url)
# 最初から取り消ししてあれば何もしない
if resp == None:
return False
src = resp.read()
soup = BeautifulSoup(src, 'lxml')
# 質問
usrQ = soup.find("div", class_="usrQstn")
# 質問者情報
# usrInfo = usrQ.find("div", class_="usrInfo")
# author = usrInfo.find("p", class_="usrNm")
author = soup.find("div", class_="ClapLv1UserInfo_Chie-UserInfo__UserName__1bJYU")
# posttime = usrInfo.find("p", class_="upDt")
posttime = soup.find("p", class_="ClapLv1UserInfo_Chie-UserInfo__Date__2F1LF")
# <p>日付<span>時間</span></p>から時間を取得後,時間を除去
# time = posttime.span.string
daytime = posttime.text.split(' ')
time = daytime[1]
day = daytime[0]
arr = day.split('/')
if (len(arr[1]) == 1):
arr[1] = '0' + arr[1]
if (len(arr[2]) == 1):
arr[2] = '0' + arr[2]
day = '/'.join(arr)
# 投稿
# question = usrQ.find("div", class_="ptsQes")
question = soup.find("p", class_="yjDirectSLinkTarget ClapLv1TextBlock_Chie-TextBlock__Text__1jsQC ClapLv1TextBlock_Chie-TextBlock__Text--medium__3nLu4 ClapLv1TextBlock_Chie-TextBlock__Text--SpaceOut__3kF8R ClapLv1TextBlock_Chie-TextBlock__Text--preLine__2SRma")
# 質問の前後にタブがいっぱい入るので除去
# 一行しかない質問なら配列の長さは1
# 複数行あれば配列の長さは2
#car = re.sub('^s+$', '', re.sub(r'^\s+', '', qbody[0].text))
car = re.sub('^s+$', '', re.sub(r'^\s+', '', question.text))
#if (len(qbody) == 2):
# cdr = re.sub(r'\s+$', '', re.sub(r'^\s+', '', qbody[1].text))
# 補足
qsup = question.find("p", class_="queTxtsup")
if (qsup != None):
sup = qsup.text
else:
sup = ''
# お礼
thxpt = question.find("cc", class_="ClapLv2QuestionItem_Chie-QuestionItem__SubChieCoin__2akxj")
if (thxpt != None):
point = thxpt.text
else:
point = 0
# 現在時刻
now = datetime.datetime.now()
# MongoDBに接続
client = MongoClient('mongodb://localhost:27017')
db = client.local
# mongoへの書き込み
data = {
# 'author': author.text,
'url' : url,
'getdate': now.strftime("%Y/%m/%d %H:%M:%S"),
'postdate': day + ' ' + time,
'main': main,
'mainlink': mainlink,
'body': car,
'point': point,
'sup': sup
}
db.qa.insert_one(data)
client.close()
return True
# syslogに質問件数を書き込む
def outputCount(msg):
client = MongoClient('mongodb://localhost:27017')
db = client.local
count = db.qa.count()
syslog.openlog("chiebukuro")
syslog.syslog(msg + ":" + str(count))
client.close()
# 知恵袋・大学入試カテゴリのトップページ
url = 'https://chiebukuro.yahoo.co.jp/dir/list.php?did=2079405665&flg=3&type=list&sort=2'
# トップページの情報を取得
resp = urllib.request.urlopen(url)
# オプション
batchMode = False
allMode = False
for i in range(1, len(sys.argv)):
# バッチモード
if sys.argv[i] == "--batch":
batchMode = True
if sys.argv[i] == "--all":
allMode = True
if batchMode == True:
outputCount("begin")
breakFlag = False
series = 0
# 最後までクロールしたらbreakする
while True:
src = resp.read()
soup = BeautifulSoup(src, 'lxml')
qalst = soup.find("div", id="qa_lst")
qas = qalst.find_all("div", class_="ClapLv3List_Chie-List__ListItem__y_P8W")
# 一画面分
for qa in qas:
ithref = qa.find("a")
ittext = qa.find("h2")
# クロール中の記事のタイトルとURLの表示
if batchMode == False:
print('text:'+ittext.text)
print('href:'+ithref.get('href'))
mainq = qa.find("div", class_="ClapLv1TextBlock_Chie-TextBlock__3X4V5")
if mainq.text == "大学受験":
# 当該カテゴリのみへの投稿
dbFlag = insertQuestion(ithref.get('href'), '', '')
else:
# 複数カテゴリへの投稿
dbFlag = insertQuestion(ithref.get('href'), mainq.text, mainq.get('href'))
if dbFlag == False:
series = series + 1
else:
series = 0
time.sleep(1)
if allMode == False and series == 10:
breakFlag = True
break
if breakFlag == True:
break
# 次へのリンクを探す
anchor = soup.find("a", class_="ClapLv1Pagination_Chie-Pagination__Anchor--Next__3keHe")
# なければ終了(最後までクロールした)
if anchor == None:
break
url = anchor.get("href")
time.sleep(10)
# 次のクロールする質問リスト
if batchMode == False:
print('anchor:'+url)
else:
syslog.syslog(url)
resp = urllib.request.urlopen('https://chiebukuro.yahoo.co.jp' + url)
if batchMode == True:
outputCount("begin")
|
ja
| 0.977081
|
#!/usr/bin/python3 # coding=utf-8 # 指定した記事を開き,取り消し済みならNoneを返す # これは取り消し済 # どの道中身は返せない # 指定したURLのページを情報を取得しMongoDBに追加する # True: DBの内容を変更した # False: DBの内容はそのまま # 登録済URLなら以下の処理は実施しない # 取り消し済みなら何もしない # 検索クエリを発行 # 最初から取り消ししてあれば何もしない # 質問 # 質問者情報 # usrInfo = usrQ.find("div", class_="usrInfo") # author = usrInfo.find("p", class_="usrNm") # posttime = usrInfo.find("p", class_="upDt") # <p>日付<span>時間</span></p>から時間を取得後,時間を除去 # time = posttime.span.string # 投稿 # question = usrQ.find("div", class_="ptsQes") # 質問の前後にタブがいっぱい入るので除去 # 一行しかない質問なら配列の長さは1 # 複数行あれば配列の長さは2 #car = re.sub('^s+$', '', re.sub(r'^\s+', '', qbody[0].text)) #if (len(qbody) == 2): # cdr = re.sub(r'\s+$', '', re.sub(r'^\s+', '', qbody[1].text)) # 補足 # お礼 # 現在時刻 # MongoDBに接続 # mongoへの書き込み # 'author': author.text, # syslogに質問件数を書き込む # 知恵袋・大学入試カテゴリのトップページ # トップページの情報を取得 # オプション # バッチモード # 最後までクロールしたらbreakする # 一画面分 # クロール中の記事のタイトルとURLの表示 # 当該カテゴリのみへの投稿 # 複数カテゴリへの投稿 # 次へのリンクを探す # なければ終了(最後までクロールした) # 次のクロールする質問リスト
| 2.706044
| 3
|
python/blocked_sampling/dataset/helper_compute_area/SRLutils.py
|
goldleaf3i/generativeCMLgraphs
| 0
|
6627716
|
<reponame>goldleaf3i/generativeCMLgraphs
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import matplotlib.colors as pltcol
import matplotlib.cbook as cbook
import numpy as np
import math
import cmath
import glob
from myDictionaries import *
from xml.dom.minidom import parse
import numpy.random as rnd
from matplotlib.patches import Ellipse
import sys
sys.path.insert(0,'../..')
from utils import *
# STUPIDO SCIKIT
import warnings
warnings.filterwarnings("ignore")
def print_matrix(M) :
[r,c] = M.shape
for i in xrange(r) :
line = str()
for j in M[i]:
line += "%.3f"%j + ', '
#line+= str("{0:2f}".format(M[i,j]))+' ,'
line = line[:-2]
print line
def label_to_number(label) :
if label == 'R' :
return 0
elif label == 'C' :
return 1
elif label == 'E' :
return 0
else :
return -1
def number_to_label(label) :
if label == 0 :
return 'R'
elif label == 1 :
return 'C'
else :
return -1
def building_to_number(building) :
if building == 'school' :
return 0
elif building == 'office' :
return 1
elif building == 'fake' :
return 2
else :
return -1
def number_to_building(building) :
if label == 0 :
return 'school'
elif label == 1 :
return 'office'
elif label == 2 :
return 'fake'
else :
return -1
def get_label_dict(buildingtype = 'school'):
labelxml = buildingtype+'.xml'
xmldoc = parse(labelxml)
labels = {}
letters = {}
nodeLabels = xmldoc.getElementsByTagName("label")
counter = 1;
for nodeLabel in nodeLabels:
name = nodeLabel.getElementsByTagName("name")[0].childNodes[0].nodeValue
letter = nodeLabel.getElementsByTagName("letter")[0].childNodes[0].nodeValue
function = nodeLabel.getElementsByTagName("function")[0].childNodes[0].nodeValue
RC = nodeLabel.getElementsByTagName("type")[0].childNodes[0].nodeValue
labels[name] = letter
letters[name] = {
'letter' : letter ,
'color': Java2012_colorDict[letter],
'number' : labels_java2012toMatlab_Dict[letter],
#'RC' : labels_RC_java2012[letter],
#'ellipse' : Ellipse(xy=[0.7,counter*0.7], width=0.6, height=0.6,angle=0),
'counter' : counter,
'RC' : RC if RC != u'E' else u'R',
'function' : function if function != u'F' else u'R',
'RCO' : function if function == u'F' or function == u'C' else 'O',
'namekey' : name
}
counter+=1;
return letters
def get_features(dataset_name = 'school') :
counter = 0
space_labels = {}
labels = []
portal_tuple = []
buildings_dict = dict()
for xml_file in glob.glob('ClassifierInputs/XMLs/'+dataset_name+'/*.xml'):
if counter != 0 :
print "Start parsing files."
#break
else :
counter +=1
print "#"*50
print xml_file
xml_name = xml_file[6:]
print xml_name
tree = ET.parse(xml_file)
root = tree.getroot()
# assumendo che la root sia sempre <building>
floor_id = root.attrib.get('id')
# buildings_dict[floor_id] = []
floor = root.find('floor')
spaces = floor.find('spaces')
pixels = int(root.find('scale').find('represented_distance').find('value').text)
portals = root.find('portal')
labels = list(set(labels))
rooms = dict()
for space in spaces.iter('space'):
space_labels[space.get('id')] = space.find('labels').find('label').text
# buildings_dict['floor_id'].append(space.get('id'))
space_dict = dict()
# space_dict['floor'] = floor_id
space_dict['label'] = space.find('labels').find('label').text
space_dict['connections'] = []
labels.append(space.find('labels').find('label').text)
portals = space.find('portals')
# append features
features_xml = space.find('features')
area = features_xml.find('area').get('value')
space_dict['area'] = area
perimeter = features_xml.find('perimeter').get('value')
space_dict['perimeter'] = perimeter
aoverp = features_xml.find('aoverp').get('value')
space_dict['aoverp'] = aoverp
adcs = features_xml.find('adcs').get('value')
space_dict['adcs'] = adcs
ff = features_xml.find('ff').get('value')
space_dict['ff'] = ff
circularity = features_xml.find('circularity').get('value')
space_dict['circularity'] = circularity
normalcirc = features_xml.find('normalcirc').get('value')
space_dict['normalcirc'] = normalcirc
andcs = features_xml.find('andcs').get('value')
space_dict['andcs'] = andcs
# Bulding type
space_dict['building'] = dataset_name
for portal in portals.iter('portal') :
tmp = tuple([i.text for i in portal.find('target').findall('id')])
if tmp[1] != space.get('id') :
space_dict['connections'].append(tmp[1])
elif tmp[0] != space.get('id') :
space_dict['connections'].append(tmp[0])
else :
print 'error!'
exit()
if not ((tmp[0],tmp[1]) in portal_tuple or (tmp[1],tmp[0]) in portal_tuple) :
portal_tuple.append(tmp)
rooms[space.get('id')] = space_dict
for i in rooms.keys() :
neigh_labels = []
for j in rooms[i]['connections'] :
neigh_labels.append(rooms[j]['label'])
rooms[i]['neigh'] = neigh_labels
buildings_dict[floor_id] = rooms
return buildings_dict
def get_labels_reverse_dict(legend, field, value) :
# mando la legenda di tutti e restituisco solo una slice con chiave
unique_values = []
for i in legend.keys() :
if legend[i][field] == value :
unique_values.append(i)
return unique_values
|
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import matplotlib.colors as pltcol
import matplotlib.cbook as cbook
import numpy as np
import math
import cmath
import glob
from myDictionaries import *
from xml.dom.minidom import parse
import numpy.random as rnd
from matplotlib.patches import Ellipse
import sys
sys.path.insert(0,'../..')
from utils import *
# STUPIDO SCIKIT
import warnings
warnings.filterwarnings("ignore")
def print_matrix(M) :
[r,c] = M.shape
for i in xrange(r) :
line = str()
for j in M[i]:
line += "%.3f"%j + ', '
#line+= str("{0:2f}".format(M[i,j]))+' ,'
line = line[:-2]
print line
def label_to_number(label) :
if label == 'R' :
return 0
elif label == 'C' :
return 1
elif label == 'E' :
return 0
else :
return -1
def number_to_label(label) :
if label == 0 :
return 'R'
elif label == 1 :
return 'C'
else :
return -1
def building_to_number(building) :
if building == 'school' :
return 0
elif building == 'office' :
return 1
elif building == 'fake' :
return 2
else :
return -1
def number_to_building(building) :
if label == 0 :
return 'school'
elif label == 1 :
return 'office'
elif label == 2 :
return 'fake'
else :
return -1
def get_label_dict(buildingtype = 'school'):
labelxml = buildingtype+'.xml'
xmldoc = parse(labelxml)
labels = {}
letters = {}
nodeLabels = xmldoc.getElementsByTagName("label")
counter = 1;
for nodeLabel in nodeLabels:
name = nodeLabel.getElementsByTagName("name")[0].childNodes[0].nodeValue
letter = nodeLabel.getElementsByTagName("letter")[0].childNodes[0].nodeValue
function = nodeLabel.getElementsByTagName("function")[0].childNodes[0].nodeValue
RC = nodeLabel.getElementsByTagName("type")[0].childNodes[0].nodeValue
labels[name] = letter
letters[name] = {
'letter' : letter ,
'color': Java2012_colorDict[letter],
'number' : labels_java2012toMatlab_Dict[letter],
#'RC' : labels_RC_java2012[letter],
#'ellipse' : Ellipse(xy=[0.7,counter*0.7], width=0.6, height=0.6,angle=0),
'counter' : counter,
'RC' : RC if RC != u'E' else u'R',
'function' : function if function != u'F' else u'R',
'RCO' : function if function == u'F' or function == u'C' else 'O',
'namekey' : name
}
counter+=1;
return letters
def get_features(dataset_name = 'school') :
counter = 0
space_labels = {}
labels = []
portal_tuple = []
buildings_dict = dict()
for xml_file in glob.glob('ClassifierInputs/XMLs/'+dataset_name+'/*.xml'):
if counter != 0 :
print "Start parsing files."
#break
else :
counter +=1
print "#"*50
print xml_file
xml_name = xml_file[6:]
print xml_name
tree = ET.parse(xml_file)
root = tree.getroot()
# assumendo che la root sia sempre <building>
floor_id = root.attrib.get('id')
# buildings_dict[floor_id] = []
floor = root.find('floor')
spaces = floor.find('spaces')
pixels = int(root.find('scale').find('represented_distance').find('value').text)
portals = root.find('portal')
labels = list(set(labels))
rooms = dict()
for space in spaces.iter('space'):
space_labels[space.get('id')] = space.find('labels').find('label').text
# buildings_dict['floor_id'].append(space.get('id'))
space_dict = dict()
# space_dict['floor'] = floor_id
space_dict['label'] = space.find('labels').find('label').text
space_dict['connections'] = []
labels.append(space.find('labels').find('label').text)
portals = space.find('portals')
# append features
features_xml = space.find('features')
area = features_xml.find('area').get('value')
space_dict['area'] = area
perimeter = features_xml.find('perimeter').get('value')
space_dict['perimeter'] = perimeter
aoverp = features_xml.find('aoverp').get('value')
space_dict['aoverp'] = aoverp
adcs = features_xml.find('adcs').get('value')
space_dict['adcs'] = adcs
ff = features_xml.find('ff').get('value')
space_dict['ff'] = ff
circularity = features_xml.find('circularity').get('value')
space_dict['circularity'] = circularity
normalcirc = features_xml.find('normalcirc').get('value')
space_dict['normalcirc'] = normalcirc
andcs = features_xml.find('andcs').get('value')
space_dict['andcs'] = andcs
# Bulding type
space_dict['building'] = dataset_name
for portal in portals.iter('portal') :
tmp = tuple([i.text for i in portal.find('target').findall('id')])
if tmp[1] != space.get('id') :
space_dict['connections'].append(tmp[1])
elif tmp[0] != space.get('id') :
space_dict['connections'].append(tmp[0])
else :
print 'error!'
exit()
if not ((tmp[0],tmp[1]) in portal_tuple or (tmp[1],tmp[0]) in portal_tuple) :
portal_tuple.append(tmp)
rooms[space.get('id')] = space_dict
for i in rooms.keys() :
neigh_labels = []
for j in rooms[i]['connections'] :
neigh_labels.append(rooms[j]['label'])
rooms[i]['neigh'] = neigh_labels
buildings_dict[floor_id] = rooms
return buildings_dict
def get_labels_reverse_dict(legend, field, value) :
# mando la legenda di tutti e restituisco solo una slice con chiave
unique_values = []
for i in legend.keys() :
if legend[i][field] == value :
unique_values.append(i)
return unique_values
|
it
| 0.295103
|
# STUPIDO SCIKIT #line+= str("{0:2f}".format(M[i,j]))+' ,' #'RC' : labels_RC_java2012[letter], #'ellipse' : Ellipse(xy=[0.7,counter*0.7], width=0.6, height=0.6,angle=0), #break # assumendo che la root sia sempre <building> # buildings_dict[floor_id] = [] # buildings_dict['floor_id'].append(space.get('id')) # space_dict['floor'] = floor_id # append features # Bulding type # mando la legenda di tutti e restituisco solo una slice con chiave
| 2.861623
| 3
|
groups/arm/ggd/arm.py
|
awslabs/aws-greengrass-mini-fulfillment
| 25
|
6627717
|
#!/usr/bin/env python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Greengrass Arm device
This Greengrass device controls the mini-fulfillment center 3D printed robotic
arm. It accomplishes this using two threads: one thread to control and report
upon the arm's movement through `stages` and a separate thread to read and
report upon each of the arm's servo telemetry.
The control stages that the arm device will execute in order, are:
* `home` - the arm is in or has returned to the ready position
* `find` - the arm is actively using the end-effector camera to find objects of
the correct size
* `pick` - the arm has found an object and will attempt to pick-up that object
* `sort` - the arm has grabbed an object and will place that object at the sort
position
To act in a coordinated fashion with the other Group's in the
miniature fulfillment center, this device also subscribes to device shadow in
the Master Greengrass Group. The commands that are understood from the master
shadow are:
* `run` - the arm will start executing the stages in order
* `stop` - the arm will cease operation and go to the stop position
This device expects to be launched form a command line. To learn more about that
command line type: `python arm.py --help`
"""
import os
import json
import time
import requests
import logging
import argparse
import datetime
import threading
import collections
from cachetools import TTLCache
from requests import ConnectionError
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, AWSIoTMQTTShadowClient
from AWSIoTPythonSDK.core.greengrass.discovery.providers import \
DiscoveryInfoProvider
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, DROP_OLDEST
import utils
from . import arm_servo_ids
from gg_group_setup import GroupConfigFile
from stages import ArmStages, NO_BOX_FOUND
from servo.servode import Servo, ServoProtocol, ServoGroup
dir_path = os.path.dirname(os.path.realpath(__file__))
log = logging.getLogger('arm')
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s|%(name)-8s|%(levelname)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
commands = ['run', 'stop']
should_loop = True
ggd_name = 'Empty'
cmd_event = threading.Event()
cmd_event.clear()
base_servo_cache = TTLCache(maxsize=32, ttl=120)
femur01_servo_cache = TTLCache(maxsize=32, ttl=120)
femur02_servo_cache = TTLCache(maxsize=32, ttl=120)
tibia_servo_cache = TTLCache(maxsize=32, ttl=120)
eff_servo_cache = TTLCache(maxsize=32, ttl=120)
def shadow_mgr(payload, status, token):
log.info("[shadow_mgr] shadow payload:{0} token:{1}".format(
json.dumps(json.loads(payload), sort_keys=True), token))
def initialize(device_name, config_file, root_ca, certificate, private_key,
group_ca_path):
global ggd_name
cfg = GroupConfigFile(config_file)
local = dict()
remote = dict()
# determine heartbeat device's thing name and endpoint for MQTT clients
ggd_name = cfg['devices'][device_name]['thing_name']
iot_endpoint = cfg['misc']['iot_endpoint']
# Discover Greengrass Core
dip = DiscoveryInfoProvider()
dip.configureEndpoint(iot_endpoint)
dip.configureCredentials(
caPath=root_ca, certPath=certificate, keyPath=private_key
)
dip.configureTimeout(10) # 10 sec
log.info("Discovery using CA: {0} certificate: {1} prv_key: {2}".format(
root_ca, certificate, private_key
))
# Now discover the groups in which this device is a member.
# The arm should only be in two groups. The local and master groups.
discovered, discovery_info = utils.ggc_discovery(
ggd_name, dip, retry_count=10, max_groups=2
)
# Each group returned has a groupId which can compare to the configured
# groupId in the config file. If the IDs match, the 'local' Group has been
# found and therefore local core.
# If the groupId's do not match, the 'remote' or 'master' group has been
# found.
group_list = discovery_info.getAllGroups()
for g in group_list:
logging.info("[initialize] group_id:{0}".format(g.groupId))
if g.groupId == cfg['group']['id']:
local_cores = g.coreConnectivityInfoList
local['core'] = local_cores[0] # just grab first core as local
local['ca'] = g.caList
else:
remote_cores = g.coreConnectivityInfoList
remote['core'] = remote_cores[0] # just grab first core as remote
remote['ca'] = g.caList
if len(local) > 1 and len(remote) > 1:
logging.info("[initialize] local_core:{0} remote_core:{1}".format(
local, remote
))
else:
raise EnvironmentError("Couldn't find the arm's Cores.")
# just save one of the group's CAs to use as a CA file later
local_core_ca_file = utils.save_group_ca(
local['ca'][0], group_ca_path, local['core'].groupId
)
remote_core_ca_file = utils.save_group_ca(
remote['ca'][0], group_ca_path, remote['core'].groupId
)
# Greengrass Cores discovered, now connect to Cores from this Device
# get a client to send telemetry
local_mqttc = AWSIoTMQTTClient(ggd_name)
log.info("[initialize] local gca_file:{0} cert:{1}".format(
local_core_ca_file, certificate))
local_mqttc.configureCredentials(
local_core_ca_file, private_key, certificate
)
local_mqttc.configureOfflinePublishQueueing(10, DROP_OLDEST)
if not utils.mqtt_connect(mqtt_client=local_mqttc, core_info=local['core']):
raise EnvironmentError("Connection to GG Core MQTT failed.")
# get a shadow client to receive commands
master_shadow_client = AWSIoTMQTTShadowClient(ggd_name)
log.info("[initialize] remote ca_file:{0} cert:{1}".format(
local_core_ca_file, certificate))
remote_mqttc = master_shadow_client.getMQTTConnection()
remote_mqttc.configureCredentials(
remote_core_ca_file, private_key, certificate
)
if not utils.mqtt_connect(mqtt_client=master_shadow_client,
core_info=remote['core']):
raise EnvironmentError("Connection to Master Shadow failed.")
# create and register the shadow handler on delta topics for commands
# with a persistent connection to the Master shadow
master_shadow = master_shadow_client.createShadowHandlerWithName(
cfg['misc']['master_shadow_name'], True)
log.info("[initialize] created handler for shadow name: {0}".format(
cfg['misc']['master_shadow_name']
))
token = master_shadow.shadowGet(shadow_mgr, 5)
log.info("[initialize] shadowGet() tk:{0}".format(token))
return local_mqttc, remote_mqttc, master_shadow
def _stage_message(stage, text='', stage_result=None):
return json.dumps({
"stage": stage,
"addl_text": text,
"stage_result": stage_result,
"ts": datetime.datetime.now().isoformat(),
"ggd_id": ggd_name
})
def _arm_message(servo_group):
data = []
for servo in servo_group:
log.debug("[_arm_message] servo:{0}".format(servo))
data.append({
"sensor_id": "arm_servo_id_{0:02d}".format(
servo_group[servo].servo_id),
"ts": datetime.datetime.now().isoformat(),
"present_speed":
servo_group[servo]['present_speed'],
"present_position":
servo_group[servo]['present_position'],
"present_load":
servo_group[servo]['present_load'],
"goal_position":
servo_group[servo]['goal_position'],
"moving":
servo_group[servo]['moving'],
"present_temperature":
servo_group[servo]['present_temperature'],
"torque_limit":
servo_group[servo]['torque_limit']
})
msg = {
"version": "2017-06-08",
"data": data,
"ggad_id": ggd_name
}
return msg
class ArmControlThread(threading.Thread):
"""
Thread that controls interaction with the Servos through assorted stages.
"""
# TODO move control into Lambda pending being able to access serial port
def __init__(self, servo_group, event, stage_topic, mqtt_client,
master_shadow, args=(), kwargs={}):
super(ArmControlThread, self).__init__(
name="arm_control_thread", args=args, kwargs=kwargs
)
self.sg = servo_group
log.debug("[act.__init__] servo_group:{0}".format(self.sg))
self.cmd_event = event
self.active_state = 'initialized'
self.last_state = 'initialized'
self.control_stages = collections.OrderedDict()
self.control_stages['home'] = self.home
self.control_stages['find'] = self.find
self.control_stages['pick'] = self.pick
self.control_stages['sort'] = self.sort
self.stage_topic = stage_topic
self.mqtt_client = mqtt_client
self.master_shadow = master_shadow
self.found_box = None
self.master_shadow.shadowRegisterDeltaCallback(self.shadow_mgr)
log.debug("[arm.__init__] shadowRegisterDeltaCallback()")
def _activate_command(self, cmd):
"""Use the shared `threading.Event` instance to signal a mini
fulfillment shadow command to the running Control thread.
"""
self.last_state = self.active_state
self.active_state = cmd
log.info("[arm._activate_command] last_state='{0}' state='{1}'".format(
self.last_state, cmd))
if self.active_state == 'run':
log.info("[arm._activate_command] START RUN")
self.cmd_event.set()
elif self.active_state == 'stop':
log.info("[arm._activate_command] STOP")
self.cmd_event.clear()
return
def shadow_mgr(self, payload, status, token):
"""
Process mini fulfillment shadow commands from the Master shadow.
:param payload: the shadow payload to process
:param status: the accepted, rejected, or delta status of the invocation
:param token: the token used for tracing this shadow request
:return:
"""
log.debug("[arm.shadow_mgr] shadow payload:{0}".format(
json.dumps(json.loads(payload), sort_keys=True)))
if payload == "REQUEST TIME OUT":
log.error(
"[arm.shadow_mgr] shadow 'REQUEST TIME OUT' tk:{0}".format(
token))
return
shady_vals = json.loads(payload)
if 'sort_arm_cmd' in shady_vals['state']:
cmd = shady_vals['state']['sort_arm_cmd']
if cmd in commands:
self._activate_command(cmd)
# acknowledge the desired state is now reported
self.master_shadow.shadowUpdate(json.dumps({
"state": {
"reported": {
"sort_arm_cmd": cmd}
}
}), self.shadow_mgr, 5)
else:
log.warning(
"[arm.shadow_mgr] unknown command:{0}".format(cmd))
def home(self):
log.debug("[act.home] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("home", "begin"), 0
)
stage_result = arm.stage_home()
self.mqtt_client.publish(
self.stage_topic, _stage_message("home", "end", stage_result), 0
)
log.debug("[act.home] [end]")
return stage_result
def find(self):
log.debug("[act.find] [begin]")
arm = ArmStages(self.sg)
loop = True
self.found_box = NO_BOX_FOUND
stage_result = NO_BOX_FOUND
self.mqtt_client.publish(
self.stage_topic, _stage_message("find", "begin"), 0
)
while self.cmd_event.is_set() and loop is True:
stage_result = arm.stage_find()
if stage_result['x'] and stage_result['y']: # X & Y start as none
log.info("[act.find] found box:{0}".format(stage_result))
self.found_box = stage_result
log.info("[act.find] self.found_box:{0}".format(
self.found_box))
loop = False
else:
log.info("[act.find] self.found_box:{0}".format(
self.found_box
))
log.info("[act.find] no box:{0}".format(stage_result))
time.sleep(1)
# TODO get image upload working with discovery based interaction
# # upload the image file just before stage complete
# if 'filename' in stage_result:
# filename = stage_result['filename']
#
# url = 'http://' + ggd_config.master_core_ip + ":"
# url = url + str(ggd_config.master_core_port) + "/upload"
# files = {'file': open(filename, 'rb')}
# try:
# log.info('[act.find] POST to URL:{0} file:{1}'.format(
# url, filename))
# r = requests.post(url, files=files)
# log.info("[act.find] POST image file response:{0}".format(
# r.status_code))
# except ConnectionError as ce:
# log.error("[act.find] Upload Image connection error:{0}".format(
# ce
# ))
self.mqtt_client.publish(
self.stage_topic, _stage_message("find", "end", stage_result), 0
)
log.info("[act.find] outside self.found_box:{0}".format(self.found_box))
log.debug("[act.find] [end]")
return stage_result
def pick(self):
log.debug("[act.pick] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("pick", "begin"), 0
)
pick_box = self.found_box
self.found_box = NO_BOX_FOUND
log.info("[act.pick] pick_box:{0}".format(pick_box))
log.info("[act.pick] self.found_box:{0}".format(self.found_box))
stage_result = arm.stage_pick(previous_results=pick_box,
cartesian=False)
self.mqtt_client.publish(
self.stage_topic, _stage_message("pick", "end", stage_result), 0
)
log.debug("[act.pick] [end]")
return stage_result
def sort(self):
log.debug("[act.sort] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("sort", "begin"), 0
)
stage_result = arm.stage_sort()
self.mqtt_client.publish(
self.stage_topic, _stage_message("sort", "end", stage_result), 0
)
log.debug("[act.sort] [end]")
return stage_result
def emergency_stop_arm(self):
if self.active_state == 'stopped' or \
self.active_state == 'initialized':
return
if 'present_position' in base_servo_cache:
stop_positions = [
base_servo_cache['present_position'],
femur01_servo_cache['present_position'],
femur02_servo_cache['present_position'],
tibia_servo_cache['present_position'],
eff_servo_cache['present_position']
]
log.info("[emergency_stop_arm] stop_positions:{0}".format(
stop_positions))
self.sg.write_values(
register='goal_position', values=stop_positions)
self.active_state = 'stopped'
log.info("[emergency_stop_arm] active_state:{0}".format(
self.active_state))
else:
log.error("[emergency_stop_arm] no 'present_position' cache value")
def stop_arm(self):
arm = ArmStages(self.sg)
if self.active_state == 'stopped' or \
self.active_state == 'initialized':
return
arm.stage_stop()
self.active_state = 'stopped'
log.info("[stop_arm] active_state:{0}".format(
self.active_state))
def run(self):
while should_loop:
for stage in self.control_stages:
if self.cmd_event.is_set():
stage_result = self.control_stages[stage]()
log.info("[run] stage:'{0}' res:'{1}'".format(
stage, stage_result))
else:
# Here is where the Arm will be stopped
self.stop_arm()
# 1/3rd of a second while iterating on control behavior
time.sleep(0.3)
class ArmTelemetryThread(threading.Thread):
"""
The thread that sets up telemetry interaction with the Servos.
"""
def __init__(self, servo_group, frequency, telemetry_topic,
mqtt_client, args=(), kwargs={}):
super(ArmTelemetryThread, self).__init__(
name="arm_telemetry_thread", args=args, kwargs=kwargs
)
self.sg = servo_group
self.frequency = frequency
self.telemetry_topic = telemetry_topic
self.mqtt_client = mqtt_client
log.info("[att.__init__] frequency:{0}".format(
self.frequency))
def run(self):
while should_loop:
msg = _arm_message(self.sg)
self.mqtt_client.publish(self.telemetry_topic, json.dumps(msg), 0)
time.sleep(self.frequency) # sample rate
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Arm control and telemetry',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('device_name',
help="The arm's GGD device_name stored in config_file.")
parser.add_argument('config_file',
help="The config file.")
parser.add_argument('root_ca',
help="Root CA File Path of Server Certificate.")
parser.add_argument('certificate',
help="File Path of GGD Certificate.")
parser.add_argument('private_key',
help="File Path of GGD Private Key.")
parser.add_argument('group_ca_path',
help="The directory where the discovered Group CA will "
"be saved.")
parser.add_argument('--stage_topic', default='/arm/stages',
help="Topic used to communicate arm stage messages.")
parser.add_argument('--telemetry_topic', default='/arm/telemetry',
help="Topic used to communicate arm telemetry.")
parser.add_argument('--frequency', default=1.0,
dest='frequency', type=float,
help="Modify the default telemetry sample frequency.")
parser.add_argument('--debug', default=False, action='store_true',
help="Activate debug output.")
pa = parser.parse_args()
if pa.debug:
log.setLevel(logging.DEBUG)
logging.getLogger('servode').setLevel(logging.DEBUG)
local_mqtt, remote_mqtt, m_shadow = initialize(
pa.device_name, pa.config_file, pa.root_ca, pa.certificate,
pa.private_key, pa.group_ca_path
)
with ServoProtocol() as sp:
for servo_id in arm_servo_ids:
sp.ping(servo=servo_id)
sg = ServoGroup()
sg['base'] = Servo(sp, arm_servo_ids[0], base_servo_cache)
sg['femur01'] = Servo(sp, arm_servo_ids[1], femur01_servo_cache)
sg['femur02'] = Servo(sp, arm_servo_ids[2], femur02_servo_cache)
sg['tibia'] = Servo(sp, arm_servo_ids[3], tibia_servo_cache)
sg['effector'] = Servo(sp, arm_servo_ids[4], eff_servo_cache)
# Use same ServoGroup with one read cache because only the telemetry
# thread reads
amt = ArmTelemetryThread(
sg, frequency=pa.frequency, telemetry_topic=pa.telemetry_topic,
mqtt_client=local_mqtt
)
act = ArmControlThread(
sg, cmd_event, stage_topic=pa.stage_topic,
mqtt_client=remote_mqtt, master_shadow=m_shadow
)
amt.start()
act.start()
try:
start = datetime.datetime.now()
while should_loop:
time.sleep(0.1)
except KeyboardInterrupt:
print("[__main__] KeyboardInterrupt ... setting should_loop=False")
should_loop = False
amt.join()
act.join()
local_mqtt.disconnect()
remote_mqtt.disconnect()
time.sleep(2)
|
#!/usr/bin/env python
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Greengrass Arm device
This Greengrass device controls the mini-fulfillment center 3D printed robotic
arm. It accomplishes this using two threads: one thread to control and report
upon the arm's movement through `stages` and a separate thread to read and
report upon each of the arm's servo telemetry.
The control stages that the arm device will execute in order, are:
* `home` - the arm is in or has returned to the ready position
* `find` - the arm is actively using the end-effector camera to find objects of
the correct size
* `pick` - the arm has found an object and will attempt to pick-up that object
* `sort` - the arm has grabbed an object and will place that object at the sort
position
To act in a coordinated fashion with the other Group's in the
miniature fulfillment center, this device also subscribes to device shadow in
the Master Greengrass Group. The commands that are understood from the master
shadow are:
* `run` - the arm will start executing the stages in order
* `stop` - the arm will cease operation and go to the stop position
This device expects to be launched form a command line. To learn more about that
command line type: `python arm.py --help`
"""
import os
import json
import time
import requests
import logging
import argparse
import datetime
import threading
import collections
from cachetools import TTLCache
from requests import ConnectionError
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, AWSIoTMQTTShadowClient
from AWSIoTPythonSDK.core.greengrass.discovery.providers import \
DiscoveryInfoProvider
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient, DROP_OLDEST
import utils
from . import arm_servo_ids
from gg_group_setup import GroupConfigFile
from stages import ArmStages, NO_BOX_FOUND
from servo.servode import Servo, ServoProtocol, ServoGroup
dir_path = os.path.dirname(os.path.realpath(__file__))
log = logging.getLogger('arm')
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s|%(name)-8s|%(levelname)s: %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.INFO)
commands = ['run', 'stop']
should_loop = True
ggd_name = 'Empty'
cmd_event = threading.Event()
cmd_event.clear()
base_servo_cache = TTLCache(maxsize=32, ttl=120)
femur01_servo_cache = TTLCache(maxsize=32, ttl=120)
femur02_servo_cache = TTLCache(maxsize=32, ttl=120)
tibia_servo_cache = TTLCache(maxsize=32, ttl=120)
eff_servo_cache = TTLCache(maxsize=32, ttl=120)
def shadow_mgr(payload, status, token):
log.info("[shadow_mgr] shadow payload:{0} token:{1}".format(
json.dumps(json.loads(payload), sort_keys=True), token))
def initialize(device_name, config_file, root_ca, certificate, private_key,
group_ca_path):
global ggd_name
cfg = GroupConfigFile(config_file)
local = dict()
remote = dict()
# determine heartbeat device's thing name and endpoint for MQTT clients
ggd_name = cfg['devices'][device_name]['thing_name']
iot_endpoint = cfg['misc']['iot_endpoint']
# Discover Greengrass Core
dip = DiscoveryInfoProvider()
dip.configureEndpoint(iot_endpoint)
dip.configureCredentials(
caPath=root_ca, certPath=certificate, keyPath=private_key
)
dip.configureTimeout(10) # 10 sec
log.info("Discovery using CA: {0} certificate: {1} prv_key: {2}".format(
root_ca, certificate, private_key
))
# Now discover the groups in which this device is a member.
# The arm should only be in two groups. The local and master groups.
discovered, discovery_info = utils.ggc_discovery(
ggd_name, dip, retry_count=10, max_groups=2
)
# Each group returned has a groupId which can compare to the configured
# groupId in the config file. If the IDs match, the 'local' Group has been
# found and therefore local core.
# If the groupId's do not match, the 'remote' or 'master' group has been
# found.
group_list = discovery_info.getAllGroups()
for g in group_list:
logging.info("[initialize] group_id:{0}".format(g.groupId))
if g.groupId == cfg['group']['id']:
local_cores = g.coreConnectivityInfoList
local['core'] = local_cores[0] # just grab first core as local
local['ca'] = g.caList
else:
remote_cores = g.coreConnectivityInfoList
remote['core'] = remote_cores[0] # just grab first core as remote
remote['ca'] = g.caList
if len(local) > 1 and len(remote) > 1:
logging.info("[initialize] local_core:{0} remote_core:{1}".format(
local, remote
))
else:
raise EnvironmentError("Couldn't find the arm's Cores.")
# just save one of the group's CAs to use as a CA file later
local_core_ca_file = utils.save_group_ca(
local['ca'][0], group_ca_path, local['core'].groupId
)
remote_core_ca_file = utils.save_group_ca(
remote['ca'][0], group_ca_path, remote['core'].groupId
)
# Greengrass Cores discovered, now connect to Cores from this Device
# get a client to send telemetry
local_mqttc = AWSIoTMQTTClient(ggd_name)
log.info("[initialize] local gca_file:{0} cert:{1}".format(
local_core_ca_file, certificate))
local_mqttc.configureCredentials(
local_core_ca_file, private_key, certificate
)
local_mqttc.configureOfflinePublishQueueing(10, DROP_OLDEST)
if not utils.mqtt_connect(mqtt_client=local_mqttc, core_info=local['core']):
raise EnvironmentError("Connection to GG Core MQTT failed.")
# get a shadow client to receive commands
master_shadow_client = AWSIoTMQTTShadowClient(ggd_name)
log.info("[initialize] remote ca_file:{0} cert:{1}".format(
local_core_ca_file, certificate))
remote_mqttc = master_shadow_client.getMQTTConnection()
remote_mqttc.configureCredentials(
remote_core_ca_file, private_key, certificate
)
if not utils.mqtt_connect(mqtt_client=master_shadow_client,
core_info=remote['core']):
raise EnvironmentError("Connection to Master Shadow failed.")
# create and register the shadow handler on delta topics for commands
# with a persistent connection to the Master shadow
master_shadow = master_shadow_client.createShadowHandlerWithName(
cfg['misc']['master_shadow_name'], True)
log.info("[initialize] created handler for shadow name: {0}".format(
cfg['misc']['master_shadow_name']
))
token = master_shadow.shadowGet(shadow_mgr, 5)
log.info("[initialize] shadowGet() tk:{0}".format(token))
return local_mqttc, remote_mqttc, master_shadow
def _stage_message(stage, text='', stage_result=None):
return json.dumps({
"stage": stage,
"addl_text": text,
"stage_result": stage_result,
"ts": datetime.datetime.now().isoformat(),
"ggd_id": ggd_name
})
def _arm_message(servo_group):
data = []
for servo in servo_group:
log.debug("[_arm_message] servo:{0}".format(servo))
data.append({
"sensor_id": "arm_servo_id_{0:02d}".format(
servo_group[servo].servo_id),
"ts": datetime.datetime.now().isoformat(),
"present_speed":
servo_group[servo]['present_speed'],
"present_position":
servo_group[servo]['present_position'],
"present_load":
servo_group[servo]['present_load'],
"goal_position":
servo_group[servo]['goal_position'],
"moving":
servo_group[servo]['moving'],
"present_temperature":
servo_group[servo]['present_temperature'],
"torque_limit":
servo_group[servo]['torque_limit']
})
msg = {
"version": "2017-06-08",
"data": data,
"ggad_id": ggd_name
}
return msg
class ArmControlThread(threading.Thread):
"""
Thread that controls interaction with the Servos through assorted stages.
"""
# TODO move control into Lambda pending being able to access serial port
def __init__(self, servo_group, event, stage_topic, mqtt_client,
master_shadow, args=(), kwargs={}):
super(ArmControlThread, self).__init__(
name="arm_control_thread", args=args, kwargs=kwargs
)
self.sg = servo_group
log.debug("[act.__init__] servo_group:{0}".format(self.sg))
self.cmd_event = event
self.active_state = 'initialized'
self.last_state = 'initialized'
self.control_stages = collections.OrderedDict()
self.control_stages['home'] = self.home
self.control_stages['find'] = self.find
self.control_stages['pick'] = self.pick
self.control_stages['sort'] = self.sort
self.stage_topic = stage_topic
self.mqtt_client = mqtt_client
self.master_shadow = master_shadow
self.found_box = None
self.master_shadow.shadowRegisterDeltaCallback(self.shadow_mgr)
log.debug("[arm.__init__] shadowRegisterDeltaCallback()")
def _activate_command(self, cmd):
"""Use the shared `threading.Event` instance to signal a mini
fulfillment shadow command to the running Control thread.
"""
self.last_state = self.active_state
self.active_state = cmd
log.info("[arm._activate_command] last_state='{0}' state='{1}'".format(
self.last_state, cmd))
if self.active_state == 'run':
log.info("[arm._activate_command] START RUN")
self.cmd_event.set()
elif self.active_state == 'stop':
log.info("[arm._activate_command] STOP")
self.cmd_event.clear()
return
def shadow_mgr(self, payload, status, token):
"""
Process mini fulfillment shadow commands from the Master shadow.
:param payload: the shadow payload to process
:param status: the accepted, rejected, or delta status of the invocation
:param token: the token used for tracing this shadow request
:return:
"""
log.debug("[arm.shadow_mgr] shadow payload:{0}".format(
json.dumps(json.loads(payload), sort_keys=True)))
if payload == "REQUEST TIME OUT":
log.error(
"[arm.shadow_mgr] shadow 'REQUEST TIME OUT' tk:{0}".format(
token))
return
shady_vals = json.loads(payload)
if 'sort_arm_cmd' in shady_vals['state']:
cmd = shady_vals['state']['sort_arm_cmd']
if cmd in commands:
self._activate_command(cmd)
# acknowledge the desired state is now reported
self.master_shadow.shadowUpdate(json.dumps({
"state": {
"reported": {
"sort_arm_cmd": cmd}
}
}), self.shadow_mgr, 5)
else:
log.warning(
"[arm.shadow_mgr] unknown command:{0}".format(cmd))
def home(self):
log.debug("[act.home] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("home", "begin"), 0
)
stage_result = arm.stage_home()
self.mqtt_client.publish(
self.stage_topic, _stage_message("home", "end", stage_result), 0
)
log.debug("[act.home] [end]")
return stage_result
def find(self):
log.debug("[act.find] [begin]")
arm = ArmStages(self.sg)
loop = True
self.found_box = NO_BOX_FOUND
stage_result = NO_BOX_FOUND
self.mqtt_client.publish(
self.stage_topic, _stage_message("find", "begin"), 0
)
while self.cmd_event.is_set() and loop is True:
stage_result = arm.stage_find()
if stage_result['x'] and stage_result['y']: # X & Y start as none
log.info("[act.find] found box:{0}".format(stage_result))
self.found_box = stage_result
log.info("[act.find] self.found_box:{0}".format(
self.found_box))
loop = False
else:
log.info("[act.find] self.found_box:{0}".format(
self.found_box
))
log.info("[act.find] no box:{0}".format(stage_result))
time.sleep(1)
# TODO get image upload working with discovery based interaction
# # upload the image file just before stage complete
# if 'filename' in stage_result:
# filename = stage_result['filename']
#
# url = 'http://' + ggd_config.master_core_ip + ":"
# url = url + str(ggd_config.master_core_port) + "/upload"
# files = {'file': open(filename, 'rb')}
# try:
# log.info('[act.find] POST to URL:{0} file:{1}'.format(
# url, filename))
# r = requests.post(url, files=files)
# log.info("[act.find] POST image file response:{0}".format(
# r.status_code))
# except ConnectionError as ce:
# log.error("[act.find] Upload Image connection error:{0}".format(
# ce
# ))
self.mqtt_client.publish(
self.stage_topic, _stage_message("find", "end", stage_result), 0
)
log.info("[act.find] outside self.found_box:{0}".format(self.found_box))
log.debug("[act.find] [end]")
return stage_result
def pick(self):
log.debug("[act.pick] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("pick", "begin"), 0
)
pick_box = self.found_box
self.found_box = NO_BOX_FOUND
log.info("[act.pick] pick_box:{0}".format(pick_box))
log.info("[act.pick] self.found_box:{0}".format(self.found_box))
stage_result = arm.stage_pick(previous_results=pick_box,
cartesian=False)
self.mqtt_client.publish(
self.stage_topic, _stage_message("pick", "end", stage_result), 0
)
log.debug("[act.pick] [end]")
return stage_result
def sort(self):
log.debug("[act.sort] [begin]")
arm = ArmStages(self.sg)
self.mqtt_client.publish(
self.stage_topic, _stage_message("sort", "begin"), 0
)
stage_result = arm.stage_sort()
self.mqtt_client.publish(
self.stage_topic, _stage_message("sort", "end", stage_result), 0
)
log.debug("[act.sort] [end]")
return stage_result
def emergency_stop_arm(self):
if self.active_state == 'stopped' or \
self.active_state == 'initialized':
return
if 'present_position' in base_servo_cache:
stop_positions = [
base_servo_cache['present_position'],
femur01_servo_cache['present_position'],
femur02_servo_cache['present_position'],
tibia_servo_cache['present_position'],
eff_servo_cache['present_position']
]
log.info("[emergency_stop_arm] stop_positions:{0}".format(
stop_positions))
self.sg.write_values(
register='goal_position', values=stop_positions)
self.active_state = 'stopped'
log.info("[emergency_stop_arm] active_state:{0}".format(
self.active_state))
else:
log.error("[emergency_stop_arm] no 'present_position' cache value")
def stop_arm(self):
arm = ArmStages(self.sg)
if self.active_state == 'stopped' or \
self.active_state == 'initialized':
return
arm.stage_stop()
self.active_state = 'stopped'
log.info("[stop_arm] active_state:{0}".format(
self.active_state))
def run(self):
while should_loop:
for stage in self.control_stages:
if self.cmd_event.is_set():
stage_result = self.control_stages[stage]()
log.info("[run] stage:'{0}' res:'{1}'".format(
stage, stage_result))
else:
# Here is where the Arm will be stopped
self.stop_arm()
# 1/3rd of a second while iterating on control behavior
time.sleep(0.3)
class ArmTelemetryThread(threading.Thread):
"""
The thread that sets up telemetry interaction with the Servos.
"""
def __init__(self, servo_group, frequency, telemetry_topic,
mqtt_client, args=(), kwargs={}):
super(ArmTelemetryThread, self).__init__(
name="arm_telemetry_thread", args=args, kwargs=kwargs
)
self.sg = servo_group
self.frequency = frequency
self.telemetry_topic = telemetry_topic
self.mqtt_client = mqtt_client
log.info("[att.__init__] frequency:{0}".format(
self.frequency))
def run(self):
while should_loop:
msg = _arm_message(self.sg)
self.mqtt_client.publish(self.telemetry_topic, json.dumps(msg), 0)
time.sleep(self.frequency) # sample rate
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Arm control and telemetry',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('device_name',
help="The arm's GGD device_name stored in config_file.")
parser.add_argument('config_file',
help="The config file.")
parser.add_argument('root_ca',
help="Root CA File Path of Server Certificate.")
parser.add_argument('certificate',
help="File Path of GGD Certificate.")
parser.add_argument('private_key',
help="File Path of GGD Private Key.")
parser.add_argument('group_ca_path',
help="The directory where the discovered Group CA will "
"be saved.")
parser.add_argument('--stage_topic', default='/arm/stages',
help="Topic used to communicate arm stage messages.")
parser.add_argument('--telemetry_topic', default='/arm/telemetry',
help="Topic used to communicate arm telemetry.")
parser.add_argument('--frequency', default=1.0,
dest='frequency', type=float,
help="Modify the default telemetry sample frequency.")
parser.add_argument('--debug', default=False, action='store_true',
help="Activate debug output.")
pa = parser.parse_args()
if pa.debug:
log.setLevel(logging.DEBUG)
logging.getLogger('servode').setLevel(logging.DEBUG)
local_mqtt, remote_mqtt, m_shadow = initialize(
pa.device_name, pa.config_file, pa.root_ca, pa.certificate,
pa.private_key, pa.group_ca_path
)
with ServoProtocol() as sp:
for servo_id in arm_servo_ids:
sp.ping(servo=servo_id)
sg = ServoGroup()
sg['base'] = Servo(sp, arm_servo_ids[0], base_servo_cache)
sg['femur01'] = Servo(sp, arm_servo_ids[1], femur01_servo_cache)
sg['femur02'] = Servo(sp, arm_servo_ids[2], femur02_servo_cache)
sg['tibia'] = Servo(sp, arm_servo_ids[3], tibia_servo_cache)
sg['effector'] = Servo(sp, arm_servo_ids[4], eff_servo_cache)
# Use same ServoGroup with one read cache because only the telemetry
# thread reads
amt = ArmTelemetryThread(
sg, frequency=pa.frequency, telemetry_topic=pa.telemetry_topic,
mqtt_client=local_mqtt
)
act = ArmControlThread(
sg, cmd_event, stage_topic=pa.stage_topic,
mqtt_client=remote_mqtt, master_shadow=m_shadow
)
amt.start()
act.start()
try:
start = datetime.datetime.now()
while should_loop:
time.sleep(0.1)
except KeyboardInterrupt:
print("[__main__] KeyboardInterrupt ... setting should_loop=False")
should_loop = False
amt.join()
act.join()
local_mqtt.disconnect()
remote_mqtt.disconnect()
time.sleep(2)
|
en
| 0.863877
|
#!/usr/bin/env python # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not # use this file except in compliance with the License. A copy of the License is # located at # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. Greengrass Arm device This Greengrass device controls the mini-fulfillment center 3D printed robotic arm. It accomplishes this using two threads: one thread to control and report upon the arm's movement through `stages` and a separate thread to read and report upon each of the arm's servo telemetry. The control stages that the arm device will execute in order, are: * `home` - the arm is in or has returned to the ready position * `find` - the arm is actively using the end-effector camera to find objects of the correct size * `pick` - the arm has found an object and will attempt to pick-up that object * `sort` - the arm has grabbed an object and will place that object at the sort position To act in a coordinated fashion with the other Group's in the miniature fulfillment center, this device also subscribes to device shadow in the Master Greengrass Group. The commands that are understood from the master shadow are: * `run` - the arm will start executing the stages in order * `stop` - the arm will cease operation and go to the stop position This device expects to be launched form a command line. To learn more about that command line type: `python arm.py --help` # determine heartbeat device's thing name and endpoint for MQTT clients # Discover Greengrass Core # 10 sec # Now discover the groups in which this device is a member. # The arm should only be in two groups. The local and master groups. # Each group returned has a groupId which can compare to the configured # groupId in the config file. If the IDs match, the 'local' Group has been # found and therefore local core. # If the groupId's do not match, the 'remote' or 'master' group has been # found. # just grab first core as local # just grab first core as remote # just save one of the group's CAs to use as a CA file later # Greengrass Cores discovered, now connect to Cores from this Device # get a client to send telemetry # get a shadow client to receive commands # create and register the shadow handler on delta topics for commands # with a persistent connection to the Master shadow Thread that controls interaction with the Servos through assorted stages. # TODO move control into Lambda pending being able to access serial port Use the shared `threading.Event` instance to signal a mini fulfillment shadow command to the running Control thread. Process mini fulfillment shadow commands from the Master shadow. :param payload: the shadow payload to process :param status: the accepted, rejected, or delta status of the invocation :param token: the token used for tracing this shadow request :return: # acknowledge the desired state is now reported # X & Y start as none # TODO get image upload working with discovery based interaction # # upload the image file just before stage complete # if 'filename' in stage_result: # filename = stage_result['filename'] # # url = 'http://' + ggd_config.master_core_ip + ":" # url = url + str(ggd_config.master_core_port) + "/upload" # files = {'file': open(filename, 'rb')} # try: # log.info('[act.find] POST to URL:{0} file:{1}'.format( # url, filename)) # r = requests.post(url, files=files) # log.info("[act.find] POST image file response:{0}".format( # r.status_code)) # except ConnectionError as ce: # log.error("[act.find] Upload Image connection error:{0}".format( # ce # )) # Here is where the Arm will be stopped # 1/3rd of a second while iterating on control behavior The thread that sets up telemetry interaction with the Servos. # sample rate # Use same ServoGroup with one read cache because only the telemetry # thread reads
| 2.163499
| 2
|
grr/server/grr_response_server/aff4_objects/stats_store.py
|
billstackpole/grr
| 1
|
6627718
|
#!/usr/bin/env python
"""Storage implementation for gathered statistics.
Statistics collected by StatsCollector (see lib/stats.py) is stored in AFF4
space. Statistics data for different parts of the system is separated by
process ids. For example, for the frontend, process id may be "frontend",
for worker - "worker", etc.
On the AFF4 statistics data is stored under aff4:/stats_store.
aff4:/stats_store itself is a URN of a StatsStore object that can be used
for querying stored data and saving new stats.
For every process id, aff4:/stats_store/<process id> object of type
StatsStoreProcessData is created. This object stores metadata of all
the metrics in the METRICS_METADATA field. All the collected statistics
data are written as aff4:stats_store/<metric name> attributes to the
aff4:/stats_store/<process id> row. This way we can easily and efficiently
query statistics data for a given set of metrics for a given process id
for a given time range.
Metrics metadata are stored separately from the values themselves for
efficiency reasons. Metadata objects are created when metrics are registered.
They carry extensive information about the metrics, like metric name and
docstring, metric type, etc. This information does not change (unless changes
GRR's source code changes) and so it doesn't make sense to duplicate it
every time we write a new set of statistics data to the datastore. Therefore
metadata for all the metrics is stored in
StatsStoreProcessData.METRICS_METADATA. Metrics' values themselves are
stored as datastore row attributes.
Statistics is written to the data store by StatsStoreWorker. It periodically
fetches values for all the metrics and writes them to corresponding
object on AFF4.
"""
from __future__ import division
import logging
import re
import threading
import time
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import stats
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import stats_values
from grr_response_server import timeseries
class StatsStoreProcessData(aff4.AFF4Object):
"""Stores stats data for a particular process."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for StatsStoreProcessData."""
METRICS_METADATA = aff4.Attribute(
"aff4:stats_store_process_data/metrics_metadata",
stats_values.StatsStoreMetricsMetadata,
creates_new_object_version=False,
versioned=False)
def WriteMetadataDescriptors(self, metrics_metadata, timestamp=None):
current_metadata = self.Get(
self.Schema.METRICS_METADATA,
default=stats_values.StatsStoreMetricsMetadata())
if current_metadata.AsDict() != metrics_metadata:
store_metadata = stats_values.StatsStoreMetricsMetadata(
metrics=list(itervalues(metrics_metadata)))
self.AddAttribute(
self.Schema.METRICS_METADATA, store_metadata, age=timestamp)
self.Flush()
def WriteStats(self, timestamp=None):
metrics_metadata = stats.STATS.GetAllMetricsMetadata()
self.WriteMetadataDescriptors(metrics_metadata, timestamp=timestamp)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsWriteMetrics(
self.urn, metrics_metadata, timestamp=timestamp)
def DeleteStats(self, timestamp=data_store.DataStore.ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsDeleteStatsInRange(self.urn, timestamp)
class StatsStore(aff4.AFF4Volume):
"""Implementation of the long-term storage of collected stats data.
This class allows to write current stats data to the data store, read
and delete them. StatsStore uses data_store to store the data.
All historical stats data are stored in a single data store subject per
process. By process we mean, for example: "admin UI", "worker #1",
"worker #3", etc. Stats data are stored as subject's attributes.
"""
DATA_STORE_ROOT = rdfvalue.RDFURN("aff4:/stats_store")
ALL_TIMESTAMPS = data_store.DataStore.ALL_TIMESTAMPS
NEWEST_TIMESTAMP = data_store.DataStore.NEWEST_TIMESTAMP
def Initialize(self):
super(StatsStore, self).Initialize()
if self.urn is None:
self.urn = self.DATA_STORE_ROOT
def WriteStats(self, process_id=None, timestamp=None):
"""Writes current stats values to the data store with a given timestamp."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="rw",
token=self.token)
process_data.WriteStats(timestamp=timestamp)
def ListUsedProcessIds(self):
"""List process ids that were used when saving data to stats store."""
return [urn.Basename() for urn in self.ListChildren()]
def ReadMetadata(self, process_id=None):
"""Reads metadata of stored values for the given process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadMetadata(process_ids=[process_id])
try:
return results[process_id]
except KeyError:
return {}
def MultiReadMetadata(self, process_ids=None):
"""Reads metadata of stored values for multiple given processes."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
subjects_data = aff4.FACTORY.MultiOpen(
subjects, mode="r", token=self.token, aff4_type=StatsStoreProcessData)
results = {}
for subject_data in subjects_data:
results[subject_data.urn.Basename()] = subject_data.Get(
subject_data.Schema.METRICS_METADATA)
for process_id in process_ids:
results.setdefault(process_id, stats_values.StatsStoreMetricsMetadata())
return results
def ReadStats(self,
process_id=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads stats values from the data store for the current process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadStats(
process_ids=[process_id],
metric_name=metric_name,
timestamp=timestamp,
limit=limit)
try:
return results[process_id]
except KeyError:
return {}
def MultiReadStats(self,
process_ids=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads historical data for multiple process ids at once."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
multi_metadata = self.MultiReadMetadata(process_ids=process_ids)
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
return data_store.DB.StatsReadDataForProcesses(
subjects, metric_name, multi_metadata, timestamp=timestamp, limit=limit)
def DeleteStats(self, process_id=None, timestamp=ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="w",
token=self.token)
process_data.DeleteStats(timestamp=timestamp)
class StatsStoreDataQuery(object):
"""Query class used to results from StatsStore.ReadStats/MultiReadStats.
NOTE: this class is mutable. Although it's designed with call-chaining in
mind, you have to create new query object for every new query.
I.e. - this *will not* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
counter2 = query.In("pidw").In("counter").SeriesCount()
But this *will* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
query = stats_store.StatsStoreDataQuery(stats_data)
counter2 = query.In("pidw").In("counter").SeriesCount()
"""
VALUE_QUERY = "value"
DISTRIBUTION_SUM_QUERY = "distribution_sum"
DISTRIBUTION_COUNT_QUERY = "distribution_count"
def __init__(self, stats_data):
super(StatsStoreDataQuery, self).__init__()
self.current_dicts = [stats_data]
self.time_series = None
self.path = []
self.query_type = None
self.aggregate_via = None
self.sample_interval = None
def _TimeSeriesFromData(self, data, attr=None):
"""Build time series from StatsStore data."""
series = timeseries.Timeseries()
for value, timestamp in data:
if attr:
try:
series.Append(getattr(value, attr), timestamp)
except AttributeError:
raise ValueError("Can't find attribute %s in value %s." % (attr,
value))
else:
if hasattr(value, "sum") or hasattr(value, "count"):
raise ValueError(
"Can't treat complext type as simple value: %s" % value)
series.Append(value, timestamp)
return series
@property
def ts(self):
"""Return single timeseries.Timeseries built by this query."""
if self.time_series is None:
raise RuntimeError("Time series weren't built yet.")
if not self.time_series:
return timeseries.Timeseries()
return self.time_series[0]
def In(self, regex):
"""Narrow query's scope."""
self.path.append(regex)
new_current_dicts = []
for current_dict in self.current_dicts:
for key, value in iteritems(current_dict):
m = re.match(regex, key)
if m and m.string == m.group(0):
new_current_dicts.append(value)
self.current_dicts = new_current_dicts
return self
def _GetNestedValues(self, dicts):
"""Get all values nested in the given dictionaries.
Args:
dicts: List of dictionaries to go through.
Returns:
([nested values], status) where status is True if nested values are
dictionaries and False otherwise.
Raises:
RuntimeError: if some nested values are dictionaries and some are not.
"""
new_dicts = []
for current_dict in dicts:
for _, value in iteritems(current_dict):
new_dicts.append(value)
sub_dicts = [x for x in new_dicts if hasattr(x, "iteritems")]
if not sub_dicts:
return (new_dicts, False)
elif len(sub_dicts) == len(new_dicts):
return (new_dicts, True)
else:
raise RuntimeError("Inconsistent values hierarchy.")
def InAll(self):
"""Use all metrics in the current scope."""
self.path.append(":all")
while True:
self.current_dicts, status = self._GetNestedValues(self.current_dicts)
if not status:
break
return self
def MakeIncreasing(self):
"""Fixes the time series so that it does not decrement."""
if self.time_series is None:
raise RuntimeError("MakeIncreasing must be called after Take*().")
for time_serie in self.time_series:
time_serie.MakeIncreasing()
return self
def Normalize(self, period, start_time, stop_time, **kwargs):
"""Resample the query with given sampling interval."""
if self.time_series is None:
raise RuntimeError("Normalize must be called after Take*().")
self.sample_interval = period
self.start_time = start_time
self.stop_time = stop_time
for time_serie in self.time_series:
time_serie.Normalize(period, start_time, stop_time, **kwargs)
return self
def InTimeRange(self, range_start, range_end):
"""Only use data points withing given time range."""
if self.time_series is None:
raise RuntimeError("InTimeRange must be called after Take*().")
if range_start is None:
raise ValueError("range_start can't be None")
if range_end is None:
raise ValueError("range_end can't be None")
for time_serie in self.time_series:
time_serie.FilterRange(start_time=range_start, stop_time=range_end)
return self
def TakeValue(self):
"""Assume metrics in this query are plain values."""
self.query_type = self.VALUE_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict))
return self
def TakeDistributionSum(self):
"""Assume metrics in this query are distributions. Use their sums."""
self.query_type = self.DISTRIBUTION_SUM_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "sum"))
return self
def TakeDistributionCount(self):
"""Assume metrics in this query are distributions. Use their counts."""
self.query_type = self.DISTRIBUTION_COUNT_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "count"))
return self
def AggregateViaSum(self):
"""Aggregate multiple time series into one by summing them."""
if self.time_series is None:
raise RuntimeError("AggregateViaSum must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Resample() must be called prior to "
"AggregateViaSum().")
if not self.time_series:
return self
if len(self.time_series) == 1:
return self
current_serie = self.time_series[0]
for serie in self.time_series[1:]:
current_serie.Add(serie)
self.time_series = [current_serie]
return self
def AggregateViaMean(self):
"""Aggregate multiple time series into one by calculating mean value."""
num_time_series = len(self.time_series)
self.AggregateViaSum()
self.ts.Rescale(1.0 / num_time_series)
return self
def SeriesCount(self):
"""Return number of time series the query was narrowed to."""
if not self.time_series:
if not self.current_dicts:
return 0
else:
return len(self.current_dicts)
else:
return len(self.time_series)
def Rate(self):
"""Apply rate function to all time series in this query."""
if self.time_series is None:
raise RuntimeError("Rate must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Normalize() must be called prior to Rate().")
for time_serie in self.time_series:
time_serie.ToDeltas()
time_serie.Rescale(1.0 / self.sample_interval.seconds)
return self
def Scale(self, multiplier):
"""Scale value in all time series in this query."""
if self.time_series is None:
raise RuntimeError("Scale must be called after Take*().")
for time_serie in self.time_series:
time_serie.Rescale(multiplier)
return self
def Mean(self):
"""Calculate mean value of a single time serie in this query."""
if self.time_series is None:
raise RuntimeError("Mean must be called after Take*().")
if not self.time_series:
return 0
if len(self.time_series) != 1:
raise RuntimeError("Can only return mean for a single time serie.")
return self.time_series[0].Mean()
# Global StatsStore object
STATS_STORE = None
class StatsStoreWorker(object):
"""StatsStoreWorker periodically dumps stats data into the stats store."""
def __init__(self,
stats_store,
process_id,
thread_name="grr_stats_saver",
sleep=None):
super(StatsStoreWorker, self).__init__()
self.stats_store = stats_store
self.process_id = process_id
self.thread_name = thread_name
self.sleep = sleep or config.CONFIG["StatsStore.write_interval"]
def _RunLoop(self):
while True:
logging.debug("Writing stats to stats store.")
try:
self.stats_store.WriteStats(process_id=self.process_id)
except Exception as e: # pylint: disable=broad-except
logging.exception("StatsStore exception caught during WriteStats(): %s",
e)
logging.debug("Removing old stats from stats store." "")
# Maximum time we keep stats store data is three days.
stats_store_ttl = 60 * 60 * 24 * 3
try:
now = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.stats_store.DeleteStats(
process_id=self.process_id,
timestamp=(0, now - stats_store_ttl * 1000000))
except Exception as e: # pylint: disable=broad-except
logging.exception(
"StatsStore exception caught during DeleteStats(): %s", e)
time.sleep(self.sleep)
def Run(self):
self.RunAsync().join()
def RunAsync(self):
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class StatsStoreInit(registry.InitHook):
"""Hook that inits global STATS_STORE object and stats store worker."""
pre = [aff4.AFF4InitHook]
def RunOnce(self):
"""Initializes StatsStore and StatsStoreWorker."""
# SetUID is required to create and write to aff4:/stats_store
token = access_control.ACLToken(username="GRRStatsStore").SetUID()
global STATS_STORE
STATS_STORE = aff4.FACTORY.Create(None, StatsStore, mode="w", token=token)
try:
STATS_STORE.Flush()
except access_control.UnauthorizedAccess:
logging.info("Not writing aff4:/stats_store due to lack of permissions.")
# We don't need StatsStoreWorker if there's no StatsStore.process_id in
# the config.
stats_process_id = config.CONFIG["StatsStore.process_id"]
if not stats_process_id:
return
stats_store_worker = StatsStoreWorker(STATS_STORE, stats_process_id)
stats_store_worker.RunAsync()
|
#!/usr/bin/env python
"""Storage implementation for gathered statistics.
Statistics collected by StatsCollector (see lib/stats.py) is stored in AFF4
space. Statistics data for different parts of the system is separated by
process ids. For example, for the frontend, process id may be "frontend",
for worker - "worker", etc.
On the AFF4 statistics data is stored under aff4:/stats_store.
aff4:/stats_store itself is a URN of a StatsStore object that can be used
for querying stored data and saving new stats.
For every process id, aff4:/stats_store/<process id> object of type
StatsStoreProcessData is created. This object stores metadata of all
the metrics in the METRICS_METADATA field. All the collected statistics
data are written as aff4:stats_store/<metric name> attributes to the
aff4:/stats_store/<process id> row. This way we can easily and efficiently
query statistics data for a given set of metrics for a given process id
for a given time range.
Metrics metadata are stored separately from the values themselves for
efficiency reasons. Metadata objects are created when metrics are registered.
They carry extensive information about the metrics, like metric name and
docstring, metric type, etc. This information does not change (unless changes
GRR's source code changes) and so it doesn't make sense to duplicate it
every time we write a new set of statistics data to the datastore. Therefore
metadata for all the metrics is stored in
StatsStoreProcessData.METRICS_METADATA. Metrics' values themselves are
stored as datastore row attributes.
Statistics is written to the data store by StatsStoreWorker. It periodically
fetches values for all the metrics and writes them to corresponding
object on AFF4.
"""
from __future__ import division
import logging
import re
import threading
import time
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import stats
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import stats_values
from grr_response_server import timeseries
class StatsStoreProcessData(aff4.AFF4Object):
"""Stores stats data for a particular process."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for StatsStoreProcessData."""
METRICS_METADATA = aff4.Attribute(
"aff4:stats_store_process_data/metrics_metadata",
stats_values.StatsStoreMetricsMetadata,
creates_new_object_version=False,
versioned=False)
def WriteMetadataDescriptors(self, metrics_metadata, timestamp=None):
current_metadata = self.Get(
self.Schema.METRICS_METADATA,
default=stats_values.StatsStoreMetricsMetadata())
if current_metadata.AsDict() != metrics_metadata:
store_metadata = stats_values.StatsStoreMetricsMetadata(
metrics=list(itervalues(metrics_metadata)))
self.AddAttribute(
self.Schema.METRICS_METADATA, store_metadata, age=timestamp)
self.Flush()
def WriteStats(self, timestamp=None):
metrics_metadata = stats.STATS.GetAllMetricsMetadata()
self.WriteMetadataDescriptors(metrics_metadata, timestamp=timestamp)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsWriteMetrics(
self.urn, metrics_metadata, timestamp=timestamp)
def DeleteStats(self, timestamp=data_store.DataStore.ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsDeleteStatsInRange(self.urn, timestamp)
class StatsStore(aff4.AFF4Volume):
"""Implementation of the long-term storage of collected stats data.
This class allows to write current stats data to the data store, read
and delete them. StatsStore uses data_store to store the data.
All historical stats data are stored in a single data store subject per
process. By process we mean, for example: "admin UI", "worker #1",
"worker #3", etc. Stats data are stored as subject's attributes.
"""
DATA_STORE_ROOT = rdfvalue.RDFURN("aff4:/stats_store")
ALL_TIMESTAMPS = data_store.DataStore.ALL_TIMESTAMPS
NEWEST_TIMESTAMP = data_store.DataStore.NEWEST_TIMESTAMP
def Initialize(self):
super(StatsStore, self).Initialize()
if self.urn is None:
self.urn = self.DATA_STORE_ROOT
def WriteStats(self, process_id=None, timestamp=None):
"""Writes current stats values to the data store with a given timestamp."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="rw",
token=self.token)
process_data.WriteStats(timestamp=timestamp)
def ListUsedProcessIds(self):
"""List process ids that were used when saving data to stats store."""
return [urn.Basename() for urn in self.ListChildren()]
def ReadMetadata(self, process_id=None):
"""Reads metadata of stored values for the given process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadMetadata(process_ids=[process_id])
try:
return results[process_id]
except KeyError:
return {}
def MultiReadMetadata(self, process_ids=None):
"""Reads metadata of stored values for multiple given processes."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
subjects_data = aff4.FACTORY.MultiOpen(
subjects, mode="r", token=self.token, aff4_type=StatsStoreProcessData)
results = {}
for subject_data in subjects_data:
results[subject_data.urn.Basename()] = subject_data.Get(
subject_data.Schema.METRICS_METADATA)
for process_id in process_ids:
results.setdefault(process_id, stats_values.StatsStoreMetricsMetadata())
return results
def ReadStats(self,
process_id=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads stats values from the data store for the current process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadStats(
process_ids=[process_id],
metric_name=metric_name,
timestamp=timestamp,
limit=limit)
try:
return results[process_id]
except KeyError:
return {}
def MultiReadStats(self,
process_ids=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads historical data for multiple process ids at once."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
multi_metadata = self.MultiReadMetadata(process_ids=process_ids)
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
return data_store.DB.StatsReadDataForProcesses(
subjects, metric_name, multi_metadata, timestamp=timestamp, limit=limit)
def DeleteStats(self, process_id=None, timestamp=ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="w",
token=self.token)
process_data.DeleteStats(timestamp=timestamp)
class StatsStoreDataQuery(object):
"""Query class used to results from StatsStore.ReadStats/MultiReadStats.
NOTE: this class is mutable. Although it's designed with call-chaining in
mind, you have to create new query object for every new query.
I.e. - this *will not* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
counter2 = query.In("pidw").In("counter").SeriesCount()
But this *will* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
query = stats_store.StatsStoreDataQuery(stats_data)
counter2 = query.In("pidw").In("counter").SeriesCount()
"""
VALUE_QUERY = "value"
DISTRIBUTION_SUM_QUERY = "distribution_sum"
DISTRIBUTION_COUNT_QUERY = "distribution_count"
def __init__(self, stats_data):
super(StatsStoreDataQuery, self).__init__()
self.current_dicts = [stats_data]
self.time_series = None
self.path = []
self.query_type = None
self.aggregate_via = None
self.sample_interval = None
def _TimeSeriesFromData(self, data, attr=None):
"""Build time series from StatsStore data."""
series = timeseries.Timeseries()
for value, timestamp in data:
if attr:
try:
series.Append(getattr(value, attr), timestamp)
except AttributeError:
raise ValueError("Can't find attribute %s in value %s." % (attr,
value))
else:
if hasattr(value, "sum") or hasattr(value, "count"):
raise ValueError(
"Can't treat complext type as simple value: %s" % value)
series.Append(value, timestamp)
return series
@property
def ts(self):
"""Return single timeseries.Timeseries built by this query."""
if self.time_series is None:
raise RuntimeError("Time series weren't built yet.")
if not self.time_series:
return timeseries.Timeseries()
return self.time_series[0]
def In(self, regex):
"""Narrow query's scope."""
self.path.append(regex)
new_current_dicts = []
for current_dict in self.current_dicts:
for key, value in iteritems(current_dict):
m = re.match(regex, key)
if m and m.string == m.group(0):
new_current_dicts.append(value)
self.current_dicts = new_current_dicts
return self
def _GetNestedValues(self, dicts):
"""Get all values nested in the given dictionaries.
Args:
dicts: List of dictionaries to go through.
Returns:
([nested values], status) where status is True if nested values are
dictionaries and False otherwise.
Raises:
RuntimeError: if some nested values are dictionaries and some are not.
"""
new_dicts = []
for current_dict in dicts:
for _, value in iteritems(current_dict):
new_dicts.append(value)
sub_dicts = [x for x in new_dicts if hasattr(x, "iteritems")]
if not sub_dicts:
return (new_dicts, False)
elif len(sub_dicts) == len(new_dicts):
return (new_dicts, True)
else:
raise RuntimeError("Inconsistent values hierarchy.")
def InAll(self):
"""Use all metrics in the current scope."""
self.path.append(":all")
while True:
self.current_dicts, status = self._GetNestedValues(self.current_dicts)
if not status:
break
return self
def MakeIncreasing(self):
"""Fixes the time series so that it does not decrement."""
if self.time_series is None:
raise RuntimeError("MakeIncreasing must be called after Take*().")
for time_serie in self.time_series:
time_serie.MakeIncreasing()
return self
def Normalize(self, period, start_time, stop_time, **kwargs):
"""Resample the query with given sampling interval."""
if self.time_series is None:
raise RuntimeError("Normalize must be called after Take*().")
self.sample_interval = period
self.start_time = start_time
self.stop_time = stop_time
for time_serie in self.time_series:
time_serie.Normalize(period, start_time, stop_time, **kwargs)
return self
def InTimeRange(self, range_start, range_end):
"""Only use data points withing given time range."""
if self.time_series is None:
raise RuntimeError("InTimeRange must be called after Take*().")
if range_start is None:
raise ValueError("range_start can't be None")
if range_end is None:
raise ValueError("range_end can't be None")
for time_serie in self.time_series:
time_serie.FilterRange(start_time=range_start, stop_time=range_end)
return self
def TakeValue(self):
"""Assume metrics in this query are plain values."""
self.query_type = self.VALUE_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict))
return self
def TakeDistributionSum(self):
"""Assume metrics in this query are distributions. Use their sums."""
self.query_type = self.DISTRIBUTION_SUM_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "sum"))
return self
def TakeDistributionCount(self):
"""Assume metrics in this query are distributions. Use their counts."""
self.query_type = self.DISTRIBUTION_COUNT_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "count"))
return self
def AggregateViaSum(self):
"""Aggregate multiple time series into one by summing them."""
if self.time_series is None:
raise RuntimeError("AggregateViaSum must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Resample() must be called prior to "
"AggregateViaSum().")
if not self.time_series:
return self
if len(self.time_series) == 1:
return self
current_serie = self.time_series[0]
for serie in self.time_series[1:]:
current_serie.Add(serie)
self.time_series = [current_serie]
return self
def AggregateViaMean(self):
"""Aggregate multiple time series into one by calculating mean value."""
num_time_series = len(self.time_series)
self.AggregateViaSum()
self.ts.Rescale(1.0 / num_time_series)
return self
def SeriesCount(self):
"""Return number of time series the query was narrowed to."""
if not self.time_series:
if not self.current_dicts:
return 0
else:
return len(self.current_dicts)
else:
return len(self.time_series)
def Rate(self):
"""Apply rate function to all time series in this query."""
if self.time_series is None:
raise RuntimeError("Rate must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Normalize() must be called prior to Rate().")
for time_serie in self.time_series:
time_serie.ToDeltas()
time_serie.Rescale(1.0 / self.sample_interval.seconds)
return self
def Scale(self, multiplier):
"""Scale value in all time series in this query."""
if self.time_series is None:
raise RuntimeError("Scale must be called after Take*().")
for time_serie in self.time_series:
time_serie.Rescale(multiplier)
return self
def Mean(self):
"""Calculate mean value of a single time serie in this query."""
if self.time_series is None:
raise RuntimeError("Mean must be called after Take*().")
if not self.time_series:
return 0
if len(self.time_series) != 1:
raise RuntimeError("Can only return mean for a single time serie.")
return self.time_series[0].Mean()
# Global StatsStore object
STATS_STORE = None
class StatsStoreWorker(object):
"""StatsStoreWorker periodically dumps stats data into the stats store."""
def __init__(self,
stats_store,
process_id,
thread_name="grr_stats_saver",
sleep=None):
super(StatsStoreWorker, self).__init__()
self.stats_store = stats_store
self.process_id = process_id
self.thread_name = thread_name
self.sleep = sleep or config.CONFIG["StatsStore.write_interval"]
def _RunLoop(self):
while True:
logging.debug("Writing stats to stats store.")
try:
self.stats_store.WriteStats(process_id=self.process_id)
except Exception as e: # pylint: disable=broad-except
logging.exception("StatsStore exception caught during WriteStats(): %s",
e)
logging.debug("Removing old stats from stats store." "")
# Maximum time we keep stats store data is three days.
stats_store_ttl = 60 * 60 * 24 * 3
try:
now = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.stats_store.DeleteStats(
process_id=self.process_id,
timestamp=(0, now - stats_store_ttl * 1000000))
except Exception as e: # pylint: disable=broad-except
logging.exception(
"StatsStore exception caught during DeleteStats(): %s", e)
time.sleep(self.sleep)
def Run(self):
self.RunAsync().join()
def RunAsync(self):
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class StatsStoreInit(registry.InitHook):
"""Hook that inits global STATS_STORE object and stats store worker."""
pre = [aff4.AFF4InitHook]
def RunOnce(self):
"""Initializes StatsStore and StatsStoreWorker."""
# SetUID is required to create and write to aff4:/stats_store
token = access_control.ACLToken(username="GRRStatsStore").SetUID()
global STATS_STORE
STATS_STORE = aff4.FACTORY.Create(None, StatsStore, mode="w", token=token)
try:
STATS_STORE.Flush()
except access_control.UnauthorizedAccess:
logging.info("Not writing aff4:/stats_store due to lack of permissions.")
# We don't need StatsStoreWorker if there's no StatsStore.process_id in
# the config.
stats_process_id = config.CONFIG["StatsStore.process_id"]
if not stats_process_id:
return
stats_store_worker = StatsStoreWorker(STATS_STORE, stats_process_id)
stats_store_worker.RunAsync()
|
en
| 0.812512
|
#!/usr/bin/env python Storage implementation for gathered statistics. Statistics collected by StatsCollector (see lib/stats.py) is stored in AFF4 space. Statistics data for different parts of the system is separated by process ids. For example, for the frontend, process id may be "frontend", for worker - "worker", etc. On the AFF4 statistics data is stored under aff4:/stats_store. aff4:/stats_store itself is a URN of a StatsStore object that can be used for querying stored data and saving new stats. For every process id, aff4:/stats_store/<process id> object of type StatsStoreProcessData is created. This object stores metadata of all the metrics in the METRICS_METADATA field. All the collected statistics data are written as aff4:stats_store/<metric name> attributes to the aff4:/stats_store/<process id> row. This way we can easily and efficiently query statistics data for a given set of metrics for a given process id for a given time range. Metrics metadata are stored separately from the values themselves for efficiency reasons. Metadata objects are created when metrics are registered. They carry extensive information about the metrics, like metric name and docstring, metric type, etc. This information does not change (unless changes GRR's source code changes) and so it doesn't make sense to duplicate it every time we write a new set of statistics data to the datastore. Therefore metadata for all the metrics is stored in StatsStoreProcessData.METRICS_METADATA. Metrics' values themselves are stored as datastore row attributes. Statistics is written to the data store by StatsStoreWorker. It periodically fetches values for all the metrics and writes them to corresponding object on AFF4. Stores stats data for a particular process. Schema for StatsStoreProcessData. Deletes all stats in the given time range. Implementation of the long-term storage of collected stats data. This class allows to write current stats data to the data store, read and delete them. StatsStore uses data_store to store the data. All historical stats data are stored in a single data store subject per process. By process we mean, for example: "admin UI", "worker #1", "worker #3", etc. Stats data are stored as subject's attributes. Writes current stats values to the data store with a given timestamp. List process ids that were used when saving data to stats store. Reads metadata of stored values for the given process. Reads metadata of stored values for multiple given processes. Reads stats values from the data store for the current process. Reads historical data for multiple process ids at once. Deletes all stats in the given time range. Query class used to results from StatsStore.ReadStats/MultiReadStats. NOTE: this class is mutable. Although it's designed with call-chaining in mind, you have to create new query object for every new query. I.e. - this *will not* work: query = stats_store.StatsStoreDataQuery(stats_data) counter1 = query.In("pid1").In("counter").SeriesCount() counter2 = query.In("pidw").In("counter").SeriesCount() But this *will* work: query = stats_store.StatsStoreDataQuery(stats_data) counter1 = query.In("pid1").In("counter").SeriesCount() query = stats_store.StatsStoreDataQuery(stats_data) counter2 = query.In("pidw").In("counter").SeriesCount() Build time series from StatsStore data. Return single timeseries.Timeseries built by this query. Narrow query's scope. Get all values nested in the given dictionaries. Args: dicts: List of dictionaries to go through. Returns: ([nested values], status) where status is True if nested values are dictionaries and False otherwise. Raises: RuntimeError: if some nested values are dictionaries and some are not. Use all metrics in the current scope. Fixes the time series so that it does not decrement. Resample the query with given sampling interval. Only use data points withing given time range. Assume metrics in this query are plain values. Assume metrics in this query are distributions. Use their sums. Assume metrics in this query are distributions. Use their counts. Aggregate multiple time series into one by summing them. Aggregate multiple time series into one by calculating mean value. Return number of time series the query was narrowed to. Apply rate function to all time series in this query. Scale value in all time series in this query. Calculate mean value of a single time serie in this query. # Global StatsStore object StatsStoreWorker periodically dumps stats data into the stats store. # pylint: disable=broad-except # Maximum time we keep stats store data is three days. # pylint: disable=broad-except Hook that inits global STATS_STORE object and stats store worker. Initializes StatsStore and StatsStoreWorker. # SetUID is required to create and write to aff4:/stats_store # We don't need StatsStoreWorker if there's no StatsStore.process_id in # the config.
| 2.172297
| 2
|
niapy/algorithms/other/hc.py
|
altaregos/NiaPy
| 202
|
6627719
|
<reponame>altaregos/NiaPy<gh_stars>100-1000
# encoding=utf8
import logging
from niapy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('niapy.algorithms.other')
logger.setLevel('INFO')
__all__ = ['HillClimbAlgorithm']
def neighborhood(x, delta, task, rng):
r"""Get neighbours of point.
Args:
x (numpy.ndarray): Point.
delta (float): Standard deviation.
task (Task): Optimization task.
rng (numpy.random.Generator): Random generator.
Returns:
Tuple[numpy.ndarray, float]:
1. New solution.
2. New solutions function/fitness value.
"""
new_x = x + rng.normal(0, delta, task.dimension)
new_x = task.repair(new_x, rng)
new_x_fitness = task.eval(new_x)
return new_x, new_x_fitness
class HillClimbAlgorithm(Algorithm):
r"""Implementation of iterative hill climbing algorithm.
Algorithm:
Hill Climbing Algorithm
Date:
2018
Authors:
<NAME>
License:
MIT
Reference URL:
Reference paper:
See Also:
* :class:`niapy.algorithms.Algorithm`
Attributes:
delta (float): Change for searching in neighborhood.
neighborhood_function (Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]): Function for getting neighbours.
"""
Name = ['HillClimbAlgorithm', 'HC']
@staticmethod
def info():
r"""Get basic information about the algorithm.
Returns:
str: Basic information.
See Also:
:func:`niapy.algorithms.algorithm.Algorithm.info`
"""
return r"""TODO"""
def __init__(self, delta=0.5, neighborhood_function=neighborhood, *args, **kwargs):
"""Initialize HillClimbAlgorithm.
Args:
* delta (Optional[float]): Change for searching in neighborhood.
* neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours.
"""
kwargs.pop('population_size', None)
super().__init__(1, *args, **kwargs)
self.delta = delta
self.neighborhood_function = neighborhood_function
def set_parameters(self, delta=0.5, neighborhood_function=neighborhood, **kwargs):
r"""Set the algorithm parameters/arguments.
Args:
* delta (Optional[float]): Change for searching in neighborhood.
* neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours.
"""
kwargs.pop('population_size', None)
super().set_parameters(population_size=1, **kwargs)
self.delta = delta
self.neighborhood_function = neighborhood_function
def get_parameters(self):
d = Algorithm.get_parameters(self)
d.update({
'delta': self.delta,
'neighborhood_function': self.neighborhood_function
})
return d
def init_population(self, task):
r"""Initialize stating point.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float, Dict[str, Any]]:
1. New individual.
2. New individual function/fitness value.
3. Additional arguments.
"""
x = task.lower + self.random(task.dimension) * task.range
return x, task.eval(x), {}
def run_iteration(self, task, x, fx, best_x, best_fitness, **params):
r"""Core function of HillClimbAlgorithm algorithm.
Args:
task (Task): Optimization task.
x (numpy.ndarray): Current solution.
fx (float): Current solutions fitness/function value.
best_x (numpy.ndarray): Global best solution.
best_fitness (float): Global best solutions function/fitness value.
**params (Dict[str, Any]): Additional arguments.
Returns:
Tuple[numpy.ndarray, float, numpy.ndarray, float, Dict[str, Any]]:
1. New solution.
2. New solutions function/fitness value.
3. Additional arguments.
"""
lo, xn = False, task.lower + task.range * self.random(task.dimension)
xn_f = task.eval(xn)
while not lo:
yn, yn_f = self.neighborhood_function(x, self.delta, task, rng=self.rng)
if yn_f < xn_f:
xn, xn_f = yn, yn_f
else:
lo = True or task.stopping_condition()
best_x, best_fitness = self.get_best(xn, xn_f, best_x, best_fitness)
return xn, xn_f, best_x, best_fitness, {}
|
# encoding=utf8
import logging
from niapy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('niapy.algorithms.other')
logger.setLevel('INFO')
__all__ = ['HillClimbAlgorithm']
def neighborhood(x, delta, task, rng):
r"""Get neighbours of point.
Args:
x (numpy.ndarray): Point.
delta (float): Standard deviation.
task (Task): Optimization task.
rng (numpy.random.Generator): Random generator.
Returns:
Tuple[numpy.ndarray, float]:
1. New solution.
2. New solutions function/fitness value.
"""
new_x = x + rng.normal(0, delta, task.dimension)
new_x = task.repair(new_x, rng)
new_x_fitness = task.eval(new_x)
return new_x, new_x_fitness
class HillClimbAlgorithm(Algorithm):
r"""Implementation of iterative hill climbing algorithm.
Algorithm:
Hill Climbing Algorithm
Date:
2018
Authors:
<NAME>
License:
MIT
Reference URL:
Reference paper:
See Also:
* :class:`niapy.algorithms.Algorithm`
Attributes:
delta (float): Change for searching in neighborhood.
neighborhood_function (Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]): Function for getting neighbours.
"""
Name = ['HillClimbAlgorithm', 'HC']
@staticmethod
def info():
r"""Get basic information about the algorithm.
Returns:
str: Basic information.
See Also:
:func:`niapy.algorithms.algorithm.Algorithm.info`
"""
return r"""TODO"""
def __init__(self, delta=0.5, neighborhood_function=neighborhood, *args, **kwargs):
"""Initialize HillClimbAlgorithm.
Args:
* delta (Optional[float]): Change for searching in neighborhood.
* neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours.
"""
kwargs.pop('population_size', None)
super().__init__(1, *args, **kwargs)
self.delta = delta
self.neighborhood_function = neighborhood_function
def set_parameters(self, delta=0.5, neighborhood_function=neighborhood, **kwargs):
r"""Set the algorithm parameters/arguments.
Args:
* delta (Optional[float]): Change for searching in neighborhood.
* neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours.
"""
kwargs.pop('population_size', None)
super().set_parameters(population_size=1, **kwargs)
self.delta = delta
self.neighborhood_function = neighborhood_function
def get_parameters(self):
d = Algorithm.get_parameters(self)
d.update({
'delta': self.delta,
'neighborhood_function': self.neighborhood_function
})
return d
def init_population(self, task):
r"""Initialize stating point.
Args:
task (Task): Optimization task.
Returns:
Tuple[numpy.ndarray, float, Dict[str, Any]]:
1. New individual.
2. New individual function/fitness value.
3. Additional arguments.
"""
x = task.lower + self.random(task.dimension) * task.range
return x, task.eval(x), {}
def run_iteration(self, task, x, fx, best_x, best_fitness, **params):
r"""Core function of HillClimbAlgorithm algorithm.
Args:
task (Task): Optimization task.
x (numpy.ndarray): Current solution.
fx (float): Current solutions fitness/function value.
best_x (numpy.ndarray): Global best solution.
best_fitness (float): Global best solutions function/fitness value.
**params (Dict[str, Any]): Additional arguments.
Returns:
Tuple[numpy.ndarray, float, numpy.ndarray, float, Dict[str, Any]]:
1. New solution.
2. New solutions function/fitness value.
3. Additional arguments.
"""
lo, xn = False, task.lower + task.range * self.random(task.dimension)
xn_f = task.eval(xn)
while not lo:
yn, yn_f = self.neighborhood_function(x, self.delta, task, rng=self.rng)
if yn_f < xn_f:
xn, xn_f = yn, yn_f
else:
lo = True or task.stopping_condition()
best_x, best_fitness = self.get_best(xn, xn_f, best_x, best_fitness)
return xn, xn_f, best_x, best_fitness, {}
|
en
| 0.566326
|
# encoding=utf8 Get neighbours of point. Args: x (numpy.ndarray): Point. delta (float): Standard deviation. task (Task): Optimization task. rng (numpy.random.Generator): Random generator. Returns: Tuple[numpy.ndarray, float]: 1. New solution. 2. New solutions function/fitness value. Implementation of iterative hill climbing algorithm. Algorithm: Hill Climbing Algorithm Date: 2018 Authors: <NAME> License: MIT Reference URL: Reference paper: See Also: * :class:`niapy.algorithms.Algorithm` Attributes: delta (float): Change for searching in neighborhood. neighborhood_function (Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]): Function for getting neighbours. Get basic information about the algorithm. Returns: str: Basic information. See Also: :func:`niapy.algorithms.algorithm.Algorithm.info` TODO Initialize HillClimbAlgorithm. Args: * delta (Optional[float]): Change for searching in neighborhood. * neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours. Set the algorithm parameters/arguments. Args: * delta (Optional[float]): Change for searching in neighborhood. * neighborhood_function (Optional[Callable[numpy.ndarray, float, Task], Tuple[numpy.ndarray, float]]]): Function for getting neighbours. Initialize stating point. Args: task (Task): Optimization task. Returns: Tuple[numpy.ndarray, float, Dict[str, Any]]: 1. New individual. 2. New individual function/fitness value. 3. Additional arguments. Core function of HillClimbAlgorithm algorithm. Args: task (Task): Optimization task. x (numpy.ndarray): Current solution. fx (float): Current solutions fitness/function value. best_x (numpy.ndarray): Global best solution. best_fitness (float): Global best solutions function/fitness value. **params (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, float, numpy.ndarray, float, Dict[str, Any]]: 1. New solution. 2. New solutions function/fitness value. 3. Additional arguments.
| 2.750111
| 3
|
tests/test_ecwid_endpoint_customers.py
|
DanPalmz/pyecwid
| 3
|
6627720
|
import json
import os
#from pprint import pprint
from pyecwid import Ecwid, EcwidMock
import pytest
import pytest_dependency
import pytest_dotenv
import time
API_TOKEN = os.getenv("API_TOKEN")
API_STORE = os.getenv("API_STORE")
SLEEP_TIME = 5
@pytest.fixture
def dummy_customer():
with open('./tests/samplejson/customer.json') as json_file:
return json.load(json_file)
@pytest.fixture
def dummy_customer_id(dummy_customer, live_ecwid):
dummy_customer_email = dummy_customer['email']
customer_results = live_ecwid.customers.get_by_email(dummy_customer_email)
if len(customer_results) > 0:
return customer_results[0]['id']
@pytest.fixture
def live_ecwid():
return Ecwid(API_TOKEN, API_STORE)
@pytest.mark.dependency()
def test_customers_retrieves_customers(live_ecwid):
result = live_ecwid.customers.get()
assert len(result) >= 1
@pytest.mark.dependency(depends=["test_customers_retrieves_customers"])
def test_product_remove_dummy_data_if_necessary(live_ecwid, dummy_customer, dummy_customer_id):
if dummy_customer_id:
result = live_ecwid.customers.delete(dummy_customer_id)
assert result == 1, "1 item removed"
else:
pass
@pytest.mark.dependency(depends=["test_product_remove_dummy_data_if_necessary"])
def test_customers_add_dummy_customer(live_ecwid, dummy_customer):
result = live_ecwid.customers.add(dummy_customer)
# Sleep: Server wasn't updating quick enough for tests following this one..
time.sleep(SLEEP_TIME)
print(result)
assert isinstance(result, int)
|
import json
import os
#from pprint import pprint
from pyecwid import Ecwid, EcwidMock
import pytest
import pytest_dependency
import pytest_dotenv
import time
API_TOKEN = os.getenv("API_TOKEN")
API_STORE = os.getenv("API_STORE")
SLEEP_TIME = 5
@pytest.fixture
def dummy_customer():
with open('./tests/samplejson/customer.json') as json_file:
return json.load(json_file)
@pytest.fixture
def dummy_customer_id(dummy_customer, live_ecwid):
dummy_customer_email = dummy_customer['email']
customer_results = live_ecwid.customers.get_by_email(dummy_customer_email)
if len(customer_results) > 0:
return customer_results[0]['id']
@pytest.fixture
def live_ecwid():
return Ecwid(API_TOKEN, API_STORE)
@pytest.mark.dependency()
def test_customers_retrieves_customers(live_ecwid):
result = live_ecwid.customers.get()
assert len(result) >= 1
@pytest.mark.dependency(depends=["test_customers_retrieves_customers"])
def test_product_remove_dummy_data_if_necessary(live_ecwid, dummy_customer, dummy_customer_id):
if dummy_customer_id:
result = live_ecwid.customers.delete(dummy_customer_id)
assert result == 1, "1 item removed"
else:
pass
@pytest.mark.dependency(depends=["test_product_remove_dummy_data_if_necessary"])
def test_customers_add_dummy_customer(live_ecwid, dummy_customer):
result = live_ecwid.customers.add(dummy_customer)
# Sleep: Server wasn't updating quick enough for tests following this one..
time.sleep(SLEEP_TIME)
print(result)
assert isinstance(result, int)
|
en
| 0.858009
|
#from pprint import pprint # Sleep: Server wasn't updating quick enough for tests following this one..
| 2.055967
| 2
|
src/zope/i18n/gettextmessagecatalog.py
|
Shoobx/zope.i18n
| 0
|
6627721
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A simple implementation of a Message Catalog.
"""
import sys
from gettext import GNUTranslations
from zope.i18n.interfaces import IGlobalMessageCatalog
from zope.interface import implementer
PY2 = sys.version_info[0] == 2
class _KeyErrorRaisingFallback(object):
def ugettext(self, message):
raise KeyError(message)
gettext = ugettext
@implementer(IGlobalMessageCatalog)
class GettextMessageCatalog(object):
"""A message catalog based on GNU gettext and Python's gettext module."""
def __init__(self, language, domain, path_to_file):
"""Initialize the message catalog"""
self.language = language
self.domain = domain
self._path_to_file = path_to_file
self.reload()
self._catalog.add_fallback(_KeyErrorRaisingFallback())
if PY2:
self._gettext = self._catalog.ugettext
else:
self._gettext = self._catalog.gettext
def reload(self):
'See IMessageCatalog'
fp = open(self._path_to_file, 'rb')
try:
self._catalog = GNUTranslations(fp)
finally:
fp.close()
def getMessage(self, id):
'See IMessageCatalog'
return self._gettext(id)
def queryMessage(self, id, default=None):
'See IMessageCatalog'
try:
return self._gettext(id)
except KeyError:
return default
def getIdentifier(self):
'See IMessageCatalog'
return self._path_to_file
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A simple implementation of a Message Catalog.
"""
import sys
from gettext import GNUTranslations
from zope.i18n.interfaces import IGlobalMessageCatalog
from zope.interface import implementer
PY2 = sys.version_info[0] == 2
class _KeyErrorRaisingFallback(object):
def ugettext(self, message):
raise KeyError(message)
gettext = ugettext
@implementer(IGlobalMessageCatalog)
class GettextMessageCatalog(object):
"""A message catalog based on GNU gettext and Python's gettext module."""
def __init__(self, language, domain, path_to_file):
"""Initialize the message catalog"""
self.language = language
self.domain = domain
self._path_to_file = path_to_file
self.reload()
self._catalog.add_fallback(_KeyErrorRaisingFallback())
if PY2:
self._gettext = self._catalog.ugettext
else:
self._gettext = self._catalog.gettext
def reload(self):
'See IMessageCatalog'
fp = open(self._path_to_file, 'rb')
try:
self._catalog = GNUTranslations(fp)
finally:
fp.close()
def getMessage(self, id):
'See IMessageCatalog'
return self._gettext(id)
def queryMessage(self, id, default=None):
'See IMessageCatalog'
try:
return self._gettext(id)
except KeyError:
return default
def getIdentifier(self):
'See IMessageCatalog'
return self._path_to_file
|
en
| 0.422573
|
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## A simple implementation of a Message Catalog. A message catalog based on GNU gettext and Python's gettext module. Initialize the message catalog
| 2.187099
| 2
|
snafu/benchmarks/systemd_analyze/systemd_analyze.py
|
dagrayvid/benchmark-wrapper
| 14
|
6627722
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""sample_benchmark hosts and export results."""
import datetime
import json
import logging
import os
import re
import subprocess
from typing import List
import distro
from snafu.benchmarks import Benchmark, BenchmarkResult
logger = logging.getLogger("snafu")
class systemd_analyze(Benchmark): # pylint: disable=invalid-name
"""Wrapper for the systemd-analyze Test benchmark.
cd /<working dir>/snafu
./run_snafu.py --tool systemd_analyze --create-archive
"""
tool_name = "systemd_analyze"
tc_values: List[str] = []
td_values: List[str] = []
# Test configuration lists
tc_list = [
"kversion",
"cpumodel",
"numcores",
"maxMHz",
"systemtgt",
] # pylint: disable=attribute-defined-outside-init
tc_values = [] # pylint: disable=attribute-defined-outside-init
# Test data lists
td_list = [
"firmware",
"loader",
"kernel",
"initrd",
"userspace",
] # pylint: disable=attribute-defined-outside-init
td_values = [] # pylint: disable=attribute-defined-outside-init
# Current date timestamp
curtime = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
) # pylint: disable=attribute-defined-outside-init
short_curtime = datetime.datetime.now().strftime(
"%Y-%m-%d"
) # pylint: disable=attribute-defined-outside-init
def setup(self): # pylint: disable=too-many-branches
"""
No arguments at this time.
args = (
ConfigArgument(
"--samples",
help="Number of samples to perform.",
dest="samples",
env_var="SAMPLES",
default=1,
type=int,
)
)
"""
self.config.parse_args()
"""Setup the systemd-analyze Test Benchmark.""" # pylint: disable=pointless-string-statement
# Get test_config values
#
# kernel version
# kversion_out = platform.release()
kversion_out = subprocess.run(["uname", "-r"], stdout=subprocess.PIPE, check=False)
kversion_out = kversion_out.stdout.decode("utf-8")
self.tc_values.insert(0, kversion_out.strip())
# cpu test config values
cpuinfo_out = subprocess.run(["lscpu"], stdout=subprocess.PIPE, check=False)
cpuinfo_out = cpuinfo_out.stdout.decode("utf-8")
# cpu model
for line in cpuinfo_out.split("\n"):
if "Model name:" in line:
model = re.search("Model name.*:(.*)", cpuinfo_out).group(1)
# Check for value
if not model:
self.tc_values.insert(1, "NULL")
else:
self.tc_values.insert(1, model.lstrip())
# number of cores
for line in cpuinfo_out.split("\n"):
if "CPU(s):" in line:
numcores = re.search(r"CPU\(s\):(.*)", cpuinfo_out).group(1)
# Check for value
if not numcores:
self.tc_values.insert(2, "NULL")
else:
self.tc_values.insert(2, numcores.strip())
# CPU max MHz
for line in cpuinfo_out.split("\n"):
if "CPU max MHz:" in line:
maxmhz = re.search("CPU max MHz:(.*)", cpuinfo_out).group(1)
# Check for value
if not maxmhz:
self.tc_values.insert(3, "NULL")
else:
self.tc_values.insert(3, maxmhz.strip())
# systemctl target
sysctl_out = subprocess.run(["systemctl", "get-default"], stdout=subprocess.PIPE, check=False)
sysctl_out = sysctl_out.stdout.decode("utf-8")
# Check for value
if not sysctl_out:
self.tc_values.insert(4, "NULL")
else:
self.tc_values.insert(4, sysctl_out.strip())
self.sa_config = {} # pylint: disable=attribute-defined-outside-init
self.sa_config["test_config"] = {}
for index in range(len(self.tc_list)): # pylint: disable=consider-using-enumerate
self.sa_config["test_config"][self.tc_list[index]] = self.tc_values[index]
self.sa_config["test_config"]["distro"] = distro.info(True)
distro_name = distro.name(pretty=True)
distro_name = distro_name.replace(" ", "_")
self.sa_config["test_config"]["distro"]["name"] = distro_name
if "clustername" in os.environ:
clustername = os.environ["clustername"]
self.sa_config["test_config"]["platform"] = clustername + "_" + distro_name + "_" + self.short_curtime
return True
def collect(self):
"""Run the systemd_analyze Test Benchmark and collect results."""
##########################
# Exec systemd-analyze cmd
sysd_out = subprocess.run(["systemd-analyze", "time"], stdout=subprocess.PIPE, check=False)
sysd_out = sysd_out.stdout.decode("utf-8")
# Parse cmd output and populate json dict
for output_str in self.td_list:
index = self.td_list.index(output_str)
result = re.findall(r"(\d+\.\d+)s\s\(" + output_str + r"\)", sysd_out)
if not result:
self.td_values.insert(index, "")
else:
logger.debug("%s", result[0])
self.td_values.insert(index, float(result[0]))
####################################
# define json struct for data points
data_point = {"date": self.curtime, "test_data": {}}
for index in range(len(self.td_list)): # pylint: disable=consider-using-enumerate
data_point["test_data"][self.td_list[index]] = self.td_values[index]
result: BenchmarkResult = self.create_new_result(
data=data_point,
config=self.sa_config,
tag="summary",
)
logger.debug(json.dumps(result.to_jsonable(), indent=4))
yield result
blame_list = self.get_sa_blame()
for blame_data_point in blame_list:
result: BenchmarkResult = self.create_new_result(
data=blame_data_point,
config=self.sa_config,
tag="blame",
)
logger.debug(json.dumps(result.to_jsonable(), indent=4))
yield result
# blame_cmd = "systemd-analyze blame"
# critical-chain_cmd + "systemd-analyze critical-chain"
def get_sa_blame(self): # pylint: disable=missing-function-docstring
blame_list = []
# Exec systemd-analyze cmd
sysd_out = subprocess.run(["systemd-analyze", "blame"], stdout=subprocess.PIPE, check=False)
sysd_out = sysd_out.stdout.decode("utf-8")
# Parse cmd output and populate json dict
for line in sysd_out.split("\n"):
words = re.split(r"\s", line)
service = words[-1]
minutes = re.search(r"(\d+)min", line)
seconds = re.search(r"(\d+\.\d+)s", line)
millisec = re.search(r"(\d+)ms", line)
if minutes and seconds:
min_var = minutes[0].strip("min")
sec = seconds[0].strip("s")
etime = str((int(min_var) * 60) + float(sec))
elif seconds and not minutes:
etime = seconds[0].strip("s")
elif millisec:
ms_var = millisec[0].strip("ms")
etime = str((int(ms_var) / 1000) % 60)
if service and etime:
data_point = {"date": self.curtime, "test_data": {}}
# print(f'{service}: {etime}') # DEBUG
data_point["test_data"]["name"] = service
data_point["test_data"]["start_time"] = float(etime)
blame_list.append(data_point)
return blame_list
def cleanup(self):
"""Cleanup the systemd-analyze Test Benchmark."""
return True
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""sample_benchmark hosts and export results."""
import datetime
import json
import logging
import os
import re
import subprocess
from typing import List
import distro
from snafu.benchmarks import Benchmark, BenchmarkResult
logger = logging.getLogger("snafu")
class systemd_analyze(Benchmark): # pylint: disable=invalid-name
"""Wrapper for the systemd-analyze Test benchmark.
cd /<working dir>/snafu
./run_snafu.py --tool systemd_analyze --create-archive
"""
tool_name = "systemd_analyze"
tc_values: List[str] = []
td_values: List[str] = []
# Test configuration lists
tc_list = [
"kversion",
"cpumodel",
"numcores",
"maxMHz",
"systemtgt",
] # pylint: disable=attribute-defined-outside-init
tc_values = [] # pylint: disable=attribute-defined-outside-init
# Test data lists
td_list = [
"firmware",
"loader",
"kernel",
"initrd",
"userspace",
] # pylint: disable=attribute-defined-outside-init
td_values = [] # pylint: disable=attribute-defined-outside-init
# Current date timestamp
curtime = datetime.datetime.now().strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
) # pylint: disable=attribute-defined-outside-init
short_curtime = datetime.datetime.now().strftime(
"%Y-%m-%d"
) # pylint: disable=attribute-defined-outside-init
def setup(self): # pylint: disable=too-many-branches
"""
No arguments at this time.
args = (
ConfigArgument(
"--samples",
help="Number of samples to perform.",
dest="samples",
env_var="SAMPLES",
default=1,
type=int,
)
)
"""
self.config.parse_args()
"""Setup the systemd-analyze Test Benchmark.""" # pylint: disable=pointless-string-statement
# Get test_config values
#
# kernel version
# kversion_out = platform.release()
kversion_out = subprocess.run(["uname", "-r"], stdout=subprocess.PIPE, check=False)
kversion_out = kversion_out.stdout.decode("utf-8")
self.tc_values.insert(0, kversion_out.strip())
# cpu test config values
cpuinfo_out = subprocess.run(["lscpu"], stdout=subprocess.PIPE, check=False)
cpuinfo_out = cpuinfo_out.stdout.decode("utf-8")
# cpu model
for line in cpuinfo_out.split("\n"):
if "Model name:" in line:
model = re.search("Model name.*:(.*)", cpuinfo_out).group(1)
# Check for value
if not model:
self.tc_values.insert(1, "NULL")
else:
self.tc_values.insert(1, model.lstrip())
# number of cores
for line in cpuinfo_out.split("\n"):
if "CPU(s):" in line:
numcores = re.search(r"CPU\(s\):(.*)", cpuinfo_out).group(1)
# Check for value
if not numcores:
self.tc_values.insert(2, "NULL")
else:
self.tc_values.insert(2, numcores.strip())
# CPU max MHz
for line in cpuinfo_out.split("\n"):
if "CPU max MHz:" in line:
maxmhz = re.search("CPU max MHz:(.*)", cpuinfo_out).group(1)
# Check for value
if not maxmhz:
self.tc_values.insert(3, "NULL")
else:
self.tc_values.insert(3, maxmhz.strip())
# systemctl target
sysctl_out = subprocess.run(["systemctl", "get-default"], stdout=subprocess.PIPE, check=False)
sysctl_out = sysctl_out.stdout.decode("utf-8")
# Check for value
if not sysctl_out:
self.tc_values.insert(4, "NULL")
else:
self.tc_values.insert(4, sysctl_out.strip())
self.sa_config = {} # pylint: disable=attribute-defined-outside-init
self.sa_config["test_config"] = {}
for index in range(len(self.tc_list)): # pylint: disable=consider-using-enumerate
self.sa_config["test_config"][self.tc_list[index]] = self.tc_values[index]
self.sa_config["test_config"]["distro"] = distro.info(True)
distro_name = distro.name(pretty=True)
distro_name = distro_name.replace(" ", "_")
self.sa_config["test_config"]["distro"]["name"] = distro_name
if "clustername" in os.environ:
clustername = os.environ["clustername"]
self.sa_config["test_config"]["platform"] = clustername + "_" + distro_name + "_" + self.short_curtime
return True
def collect(self):
"""Run the systemd_analyze Test Benchmark and collect results."""
##########################
# Exec systemd-analyze cmd
sysd_out = subprocess.run(["systemd-analyze", "time"], stdout=subprocess.PIPE, check=False)
sysd_out = sysd_out.stdout.decode("utf-8")
# Parse cmd output and populate json dict
for output_str in self.td_list:
index = self.td_list.index(output_str)
result = re.findall(r"(\d+\.\d+)s\s\(" + output_str + r"\)", sysd_out)
if not result:
self.td_values.insert(index, "")
else:
logger.debug("%s", result[0])
self.td_values.insert(index, float(result[0]))
####################################
# define json struct for data points
data_point = {"date": self.curtime, "test_data": {}}
for index in range(len(self.td_list)): # pylint: disable=consider-using-enumerate
data_point["test_data"][self.td_list[index]] = self.td_values[index]
result: BenchmarkResult = self.create_new_result(
data=data_point,
config=self.sa_config,
tag="summary",
)
logger.debug(json.dumps(result.to_jsonable(), indent=4))
yield result
blame_list = self.get_sa_blame()
for blame_data_point in blame_list:
result: BenchmarkResult = self.create_new_result(
data=blame_data_point,
config=self.sa_config,
tag="blame",
)
logger.debug(json.dumps(result.to_jsonable(), indent=4))
yield result
# blame_cmd = "systemd-analyze blame"
# critical-chain_cmd + "systemd-analyze critical-chain"
def get_sa_blame(self): # pylint: disable=missing-function-docstring
blame_list = []
# Exec systemd-analyze cmd
sysd_out = subprocess.run(["systemd-analyze", "blame"], stdout=subprocess.PIPE, check=False)
sysd_out = sysd_out.stdout.decode("utf-8")
# Parse cmd output and populate json dict
for line in sysd_out.split("\n"):
words = re.split(r"\s", line)
service = words[-1]
minutes = re.search(r"(\d+)min", line)
seconds = re.search(r"(\d+\.\d+)s", line)
millisec = re.search(r"(\d+)ms", line)
if minutes and seconds:
min_var = minutes[0].strip("min")
sec = seconds[0].strip("s")
etime = str((int(min_var) * 60) + float(sec))
elif seconds and not minutes:
etime = seconds[0].strip("s")
elif millisec:
ms_var = millisec[0].strip("ms")
etime = str((int(ms_var) / 1000) % 60)
if service and etime:
data_point = {"date": self.curtime, "test_data": {}}
# print(f'{service}: {etime}') # DEBUG
data_point["test_data"]["name"] = service
data_point["test_data"]["start_time"] = float(etime)
blame_list.append(data_point)
return blame_list
def cleanup(self):
"""Cleanup the systemd-analyze Test Benchmark."""
return True
|
en
| 0.472204
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- sample_benchmark hosts and export results. # pylint: disable=invalid-name Wrapper for the systemd-analyze Test benchmark. cd /<working dir>/snafu ./run_snafu.py --tool systemd_analyze --create-archive # Test configuration lists # pylint: disable=attribute-defined-outside-init # pylint: disable=attribute-defined-outside-init # Test data lists # pylint: disable=attribute-defined-outside-init # pylint: disable=attribute-defined-outside-init # Current date timestamp # pylint: disable=attribute-defined-outside-init # pylint: disable=attribute-defined-outside-init # pylint: disable=too-many-branches No arguments at this time. args = ( ConfigArgument( "--samples", help="Number of samples to perform.", dest="samples", env_var="SAMPLES", default=1, type=int, ) ) Setup the systemd-analyze Test Benchmark. # pylint: disable=pointless-string-statement # Get test_config values # # kernel version # kversion_out = platform.release() # cpu test config values # cpu model # Check for value # number of cores # Check for value # CPU max MHz # Check for value # systemctl target # Check for value # pylint: disable=attribute-defined-outside-init # pylint: disable=consider-using-enumerate Run the systemd_analyze Test Benchmark and collect results. ########################## # Exec systemd-analyze cmd # Parse cmd output and populate json dict #################################### # define json struct for data points # pylint: disable=consider-using-enumerate # blame_cmd = "systemd-analyze blame" # critical-chain_cmd + "systemd-analyze critical-chain" # pylint: disable=missing-function-docstring # Exec systemd-analyze cmd # Parse cmd output and populate json dict # print(f'{service}: {etime}') # DEBUG Cleanup the systemd-analyze Test Benchmark.
| 2.11775
| 2
|
src/python_quickstart_client.py
|
BlackCowThrower/batch-python-quickstart
| 0
|
6627723
|
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import config
try:
input = raw_input
except NameError:
pass
import azure.batch.batch_auth as batch_auth
import azure.batch.batch_service_client as batch
import azure.batch.models as batchmodels
import azure.storage.blob as azureblob
sys.path.append('.')
sys.path.append('..')
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def query_yes_no(question, default="yes"):
"""
Prompts the user for yes/no input, displaying the specified question text.
:param str question: The text of the prompt for input.
:param str default: The default if the user hits <ENTER>. Acceptable values
are 'yes', 'no', and None.
:rtype: str
:return: 'yes' or 'no'
"""
valid = {'y': 'yes', 'n': 'no'}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while 1:
choice = input(question + prompt).lower()
if default and not choice:
return default
try:
return valid[choice[0]]
except (KeyError, IndexError):
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print('-------------------------------------------')
print('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print('{}:\t{}'.format(mesg.key, mesg.value))
print('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print('Uploading file {} to container [{}]...'.format(file_path,
container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=1))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def create_pool(batch_service_client, pool_id):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
print('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=batchmodels.ImageReference(
publisher="Canonical",
offer="UbuntuServer",
sku="18.04-LTS",
version="latest"
),
node_agent_sku_id="batch.node.ubuntu 18.04"),
vm_size=config._POOL_VM_SIZE,
target_dedicated_nodes=config._POOL_NODE_COUNT
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
batch_service_client.job.add(job)
def add_tasks(batch_service_client, job_id, input_files, task_id, command):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be created for each input file.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print('Adding task to job [{}]...'.format(job_id))
tasks = list()
#command = "bash ./setup.sh"
user = batchmodels.UserIdentity(
auto_user=batchmodels.AutoUserSpecification(
elevation_level=batchmodels.ElevationLevel.admin,
scope=batchmodels.AutoUserScope.task))
tasks.append(batch.models.TaskAddParameter(
id=task_id,#'Task-{}'.format(int(time.time())),
command_line=command,
resource_files=input_files,
user_identity=user
))
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}..."
.format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
print()
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
def print_task_output(batch_service_client, job_id, encoding="utf-8"):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print('Printing task output...')
tasks = batch_service_client.task.list(job_id)
#list.sort(tasks)
for task in tasks:
#print(task.id)
if(task.id in ["MC-n1000-control2"]):#, "SG-n1000-control"]):
node_id = batch_service_client.task.get(
job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, config._STANDARD_OUT_FILE_NAME)
file_text = _read_stream_as_string(
stream,
encoding)
#print("Standard output:")
print(file_text.encode("utf-8"))
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes')
if __name__ == '__main__':
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
print()
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config._STORAGE_ACCOUNT_NAME,
account_key=config._STORAGE_ACCOUNT_KEY)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
input_container_name = 'akprj-files'
blob_client.create_container(input_container_name, fail_on_exist=False)
# The collection of data files that are to be processed by the tasks.
input_file_paths = [os.path.join(sys.path[0], '000D265253258402560A7EDE0E3A3559FADACA0A2A6FBA663A8F3CD2084931D7.apk'),
os.path.join(sys.path[0], 'androguard-1.9-old-master.zip'),
os.path.join(sys.path[0], 'createCFGsFromAPKs.py'),
os.path.join(sys.path[0], 'setup-akprj.sh')]
# Upload the data files.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(config._BATCH_ACCOUNT_NAME,
config._BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=config._BATCH_ACCOUNT_URL)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
#create_pool(batch_client, config._POOL_ID)
# Create the job that will run the tasks.
#create_job(batch_client, config._JOB_ID, config._POOL_ID)
# Add the tasks to the job.
#add_tasks(batch_client, config._JOB_ID, input_files, "SG-n100-tolerance-0_1", "bash ./setup-gamma.sh 100 smallGrid AIN-Pacman-Tolerance-0_1")
add_tasks(batch_client, config._JOB_ID, input_files, "ak-test-13", "bash ./setup-akprj.sh")
#add_tasks(batch_client, config._JOB_ID, input_files, "MC-n1000-control", "bash ./setup.sh 1000 mediumClassic")
# Pause execution until tasks reach Completed state.
#wait_for_tasks_to_complete(batch_client,
# config._JOB_ID,
# datetime.timedelta(minutes=30))
#print(" Success! All tasks reached the 'Completed' state within the "
# "specified timeout period.")
# Print the stdout.txt and stderr.txt files for each task to the console
#print_task_output(batch_client, config._JOB_ID)
except batchmodels.BatchErrorException as err:
print(err)
#print_batch_exception(err)
#raise
# Clean up storage resources
#print('Deleting container [{}]...'.format(input_container_name))
#blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print('Sample end: {}'.format(end_time))
print('Elapsed time: {}'.format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
"""if query_yes_no('Delete job?') == 'yes':
batch_client.job.delete(config._JOB_ID)
if query_yes_no('Delete pool?') == 'yes':
batch_client.pool.delete(config._POOL_ID)
print()
input('Press ENTER to exit...')"""
|
from __future__ import print_function
import datetime
import io
import os
import sys
import time
import config
try:
input = raw_input
except NameError:
pass
import azure.batch.batch_auth as batch_auth
import azure.batch.batch_service_client as batch
import azure.batch.models as batchmodels
import azure.storage.blob as azureblob
sys.path.append('.')
sys.path.append('..')
# Update the Batch and Storage account credential strings in config.py with values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
def query_yes_no(question, default="yes"):
"""
Prompts the user for yes/no input, displaying the specified question text.
:param str question: The text of the prompt for input.
:param str default: The default if the user hits <ENTER>. Acceptable values
are 'yes', 'no', and None.
:rtype: str
:return: 'yes' or 'no'
"""
valid = {'y': 'yes', 'n': 'no'}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("Invalid default answer: '{}'".format(default))
while 1:
choice = input(question + prompt).lower()
if default and not choice:
return default
try:
return valid[choice[0]]
except (KeyError, IndexError):
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
print('-------------------------------------------')
print('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
print(batch_exception.error.message.value)
if batch_exception.error.values:
print()
for mesg in batch_exception.error.values:
print('{}:\t{}'.format(mesg.key, mesg.value))
print('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""
Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
print('Uploading file {} to container [{}]...'.format(file_path,
container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=1))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""
Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def create_pool(batch_service_client, pool_id):
"""
Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
"""
print('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=batchmodels.ImageReference(
publisher="Canonical",
offer="UbuntuServer",
sku="18.04-LTS",
version="latest"
),
node_agent_sku_id="batch.node.ubuntu 18.04"),
vm_size=config._POOL_VM_SIZE,
target_dedicated_nodes=config._POOL_NODE_COUNT
)
batch_service_client.pool.add(new_pool)
def create_job(batch_service_client, job_id, pool_id):
"""
Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
batch_service_client.job.add(job)
def add_tasks(batch_service_client, job_id, input_files, task_id, command):
"""
Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be created for each input file.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
"""
print('Adding task to job [{}]...'.format(job_id))
tasks = list()
#command = "bash ./setup.sh"
user = batchmodels.UserIdentity(
auto_user=batchmodels.AutoUserSpecification(
elevation_level=batchmodels.ElevationLevel.admin,
scope=batchmodels.AutoUserScope.task))
tasks.append(batch.models.TaskAddParameter(
id=task_id,#'Task-{}'.format(int(time.time())),
command_line=command,
resource_files=input_files,
user_identity=user
))
batch_service_client.task.add_collection(job_id, tasks)
def wait_for_tasks_to_complete(batch_service_client, job_id, timeout):
"""
Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}..."
.format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
print()
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
def print_task_output(batch_service_client, job_id, encoding="utf-8"):
"""Prints the stdout.txt file for each task in the job.
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job with task output files to print.
"""
print('Printing task output...')
tasks = batch_service_client.task.list(job_id)
#list.sort(tasks)
for task in tasks:
#print(task.id)
if(task.id in ["MC-n1000-control2"]):#, "SG-n1000-control"]):
node_id = batch_service_client.task.get(
job_id, task.id).node_info.node_id
print("Task: {}".format(task.id))
print("Node: {}".format(node_id))
stream = batch_service_client.file.get_from_task(
job_id, task.id, config._STANDARD_OUT_FILE_NAME)
file_text = _read_stream_as_string(
stream,
encoding)
#print("Standard output:")
print(file_text.encode("utf-8"))
def _read_stream_as_string(stream, encoding):
"""Read stream as string
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes')
if __name__ == '__main__':
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
print()
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=config._STORAGE_ACCOUNT_NAME,
account_key=config._STORAGE_ACCOUNT_KEY)
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
input_container_name = 'akprj-files'
blob_client.create_container(input_container_name, fail_on_exist=False)
# The collection of data files that are to be processed by the tasks.
input_file_paths = [os.path.join(sys.path[0], '000D265253258402560A7EDE0E3A3559FADACA0A2A6FBA663A8F3CD2084931D7.apk'),
os.path.join(sys.path[0], 'androguard-1.9-old-master.zip'),
os.path.join(sys.path[0], 'createCFGsFromAPKs.py'),
os.path.join(sys.path[0], 'setup-akprj.sh')]
# Upload the data files.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batch_auth.SharedKeyCredentials(config._BATCH_ACCOUNT_NAME,
config._BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=config._BATCH_ACCOUNT_URL)
try:
# Create the pool that will contain the compute nodes that will execute the
# tasks.
#create_pool(batch_client, config._POOL_ID)
# Create the job that will run the tasks.
#create_job(batch_client, config._JOB_ID, config._POOL_ID)
# Add the tasks to the job.
#add_tasks(batch_client, config._JOB_ID, input_files, "SG-n100-tolerance-0_1", "bash ./setup-gamma.sh 100 smallGrid AIN-Pacman-Tolerance-0_1")
add_tasks(batch_client, config._JOB_ID, input_files, "ak-test-13", "bash ./setup-akprj.sh")
#add_tasks(batch_client, config._JOB_ID, input_files, "MC-n1000-control", "bash ./setup.sh 1000 mediumClassic")
# Pause execution until tasks reach Completed state.
#wait_for_tasks_to_complete(batch_client,
# config._JOB_ID,
# datetime.timedelta(minutes=30))
#print(" Success! All tasks reached the 'Completed' state within the "
# "specified timeout period.")
# Print the stdout.txt and stderr.txt files for each task to the console
#print_task_output(batch_client, config._JOB_ID)
except batchmodels.BatchErrorException as err:
print(err)
#print_batch_exception(err)
#raise
# Clean up storage resources
#print('Deleting container [{}]...'.format(input_container_name))
#blob_client.delete_container(input_container_name)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
print()
print('Sample end: {}'.format(end_time))
print('Elapsed time: {}'.format(end_time - start_time))
print()
# Clean up Batch resources (if the user so chooses).
"""if query_yes_no('Delete job?') == 'yes':
batch_client.job.delete(config._JOB_ID)
if query_yes_no('Delete pool?') == 'yes':
batch_client.pool.delete(config._POOL_ID)
print()
input('Press ENTER to exit...')"""
|
en
| 0.691681
|
# Update the Batch and Storage account credential strings in config.py with values # unique to your accounts. These are used when constructing connection strings # for the Batch and Storage client objects. Prompts the user for yes/no input, displaying the specified question text. :param str question: The text of the prompt for input. :param str default: The default if the user hits <ENTER>. Acceptable values are 'yes', 'no', and None. :rtype: str :return: 'yes' or 'no' Prints the contents of the specified Batch exception. :param batch_exception: Uploads a local file to an Azure Blob storage container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param str file_path: The local path to the file. :rtype: `azure.batch.models.ResourceFile` :return: A ResourceFile initialized with a SAS URL appropriate for Batch tasks. Obtains a shared access signature granting the specified permissions to the container. :param block_blob_client: A blob service client. :type block_blob_client: `azure.storage.blob.BlockBlobService` :param str container_name: The name of the Azure Blob storage container. :param BlobPermissions blob_permissions: :rtype: str :return: A SAS token granting the specified permissions to the container. # Obtain the SAS token for the container, setting the expiry time and # permissions. In this case, no start time is specified, so the shared # access signature becomes valid immediately. Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ Creates a job with the specified ID, associated with the specified pool. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID for the job. :param str pool_id: The ID for the pool. Adds a task for each input file in the collection to the specified job. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The ID of the job to which to add the tasks. :param list input_files: A collection of input files. One task will be created for each input file. :param output_container_sas_token: A SAS token granting write access to the specified Azure Blob storage container. #command = "bash ./setup.sh" #'Task-{}'.format(int(time.time())), Returns when all tasks in the specified job reach the Completed state. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str job_id: The id of the job whose tasks should be to monitored. :param timedelta timeout: The duration to wait for task completion. If all tasks in the specified job do not reach Completed state within this time period, an exception will be raised. Prints the stdout.txt file for each task in the job. :param batch_client: The batch client to use. :type batch_client: `batchserviceclient.BatchServiceClient` :param str job_id: The id of the job with task output files to print. #list.sort(tasks) #print(task.id) #, "SG-n1000-control"]): #print("Standard output:") Read stream as string :param stream: input stream generator :param str encoding: The encoding of the file. The default is utf-8. :return: The file content. :rtype: str # Create the blob client, for use in obtaining references to # blob storage containers and uploading files to containers. # Use the blob client to create the containers in Azure Storage if they # don't yet exist. # The collection of data files that are to be processed by the tasks. # Upload the data files. # Create a Batch service client. We'll now be interacting with the Batch # service in addition to Storage # Create the pool that will contain the compute nodes that will execute the # tasks. #create_pool(batch_client, config._POOL_ID) # Create the job that will run the tasks. #create_job(batch_client, config._JOB_ID, config._POOL_ID) # Add the tasks to the job. #add_tasks(batch_client, config._JOB_ID, input_files, "SG-n100-tolerance-0_1", "bash ./setup-gamma.sh 100 smallGrid AIN-Pacman-Tolerance-0_1") #add_tasks(batch_client, config._JOB_ID, input_files, "MC-n1000-control", "bash ./setup.sh 1000 mediumClassic") # Pause execution until tasks reach Completed state. #wait_for_tasks_to_complete(batch_client, # config._JOB_ID, # datetime.timedelta(minutes=30)) #print(" Success! All tasks reached the 'Completed' state within the " # "specified timeout period.") # Print the stdout.txt and stderr.txt files for each task to the console #print_task_output(batch_client, config._JOB_ID) #print_batch_exception(err) #raise # Clean up storage resources #print('Deleting container [{}]...'.format(input_container_name)) #blob_client.delete_container(input_container_name) # Print out some timing info # Clean up Batch resources (if the user so chooses). if query_yes_no('Delete job?') == 'yes': batch_client.job.delete(config._JOB_ID) if query_yes_no('Delete pool?') == 'yes': batch_client.pool.delete(config._POOL_ID) print() input('Press ENTER to exit...')
| 2.661802
| 3
|
poppy/manager/default/ssl_certificate.py
|
LukeRepko/poppy
| 3
|
6627724
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_context import context as context_utils
from oslo_log import log
from poppy.common import errors
from poppy.common import util
from poppy.distributed_task.taskflow.flow import create_ssl_certificate
from poppy.distributed_task.taskflow.flow import delete_ssl_certificate
from poppy.distributed_task.taskflow.flow import recreate_ssl_certificate
from poppy.manager import base
from poppy.model.helpers import domain
from poppy.model import ssl_certificate
from poppy.transport.validators import helpers as validators
LOG = log.getLogger(__name__)
class DefaultSSLCertificateController(base.SSLCertificateController):
def __init__(self, manager):
super(DefaultSSLCertificateController, self).__init__(manager)
self.distributed_task_controller = (
self._driver.distributed_task.services_controller
)
self.storage = self._driver.storage.certificates_controller
self.service_storage = self._driver.storage.services_controller
self.flavor_controller = self._driver.storage.flavors_controller
def create_ssl_certificate(
self, project_id, cert_obj, https_upgrade=False):
if (not validators.is_valid_domain_name(cert_obj.domain_name)) or \
(validators.is_root_domain(
domain.Domain(cert_obj.domain_name).to_dict())) or \
(not validators.is_valid_tld(cert_obj.domain_name)):
# here created a http domain object but it does not matter http or
# https
raise ValueError('%s must be a valid non-root domain' %
cert_obj.domain_name)
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
try:
self.storage.create_certificate(
project_id,
cert_obj
)
# ValueError will be raised if the cert_info has already existed
except ValueError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': project_id,
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'context_dict': context_utils.get_current().to_dict()
}
if https_upgrade is True:
kwargs['https_upgrade'] = True
self.distributed_task_controller.submit_task(
create_ssl_certificate.create_ssl_certificate,
**kwargs)
return kwargs
def delete_ssl_certificate(self, project_id, domain_name, cert_type):
cert_obj = self.storage.get_certs_by_domain(
domain_name, cert_type=cert_type)
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'project_id': project_id,
'domain_name': domain_name,
'cert_type': cert_type,
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'providers_list_json': json.dumps(providers),
'context_dict': context_utils.get_current().to_dict()
}
self.distributed_task_controller.submit_task(
delete_ssl_certificate.delete_ssl_certificate,
**kwargs)
return kwargs
def get_certs_info_by_domain(self, domain_name, project_id):
return self.storage.get_certs_by_domain(
domain_name=domain_name,
project_id=project_id)
def get_san_retry_list(self):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.mod_san_queue.traverse_queue()
# For other providers san_retry_list implementation goes here
else:
# if not using akamai driver just return an empty list
return []
res = [json.loads(r) for r in res]
return [
{"domain_name": r['domain_name'],
"project_id": r['project_id'],
"flavor_id": r['flavor_id'],
"cert_type": r['cert_type'],
"validate_service": r.get('validate_service', True)}
for r in res
]
def update_san_retry_list(self, queue_data_list):
for r in queue_data_list:
service_obj = self.service_storage\
.get_service_details_by_domain_name(r['domain_name'])
if service_obj is None and r.get('validate_service', True):
raise LookupError(u'Domain {0} does not exist on any service, '
'are you sure you want to proceed request, '
'{1}? You can set validate_service to False '
'to retry this san-retry request forcefully'.
format(r['domain_name'], r))
cert_for_domain = None
try:
cert_for_domain = self.storage.get_certs_by_domain(
r['domain_name'])
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(r['domain_name']))
if cert_for_domain:
if cert_for_domain.get_cert_status() == "deployed":
raise ValueError(u'Cert on {0} already exists'.
format(r['domain_name']))
new_queue_data = [
json.dumps({'flavor_id': r['flavor_id'],
'domain_name': r['domain_name'],
'project_id': r['project_id'],
'cert_type': r['cert_type'],
'validate_service': r.get('validate_service', True)})
for r in queue_data_list
]
res, deleted = [], []
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
orig = [json.loads(r) for r in
akamai_driver.mod_san_queue.traverse_queue()]
res = [json.loads(r) for r in
akamai_driver.mod_san_queue.put_queue_data(new_queue_data)]
deleted = tuple(x for x in orig if x not in res)
# other provider's retry-list implementation goes here
return res, deleted
def rerun_san_retry_list(self):
run_list = []
ignore_list = []
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
retry_list = []
while len(akamai_driver.mod_san_queue.mod_san_queue_backend) > 0:
res = akamai_driver.mod_san_queue.dequeue_mod_san_request()
retry_list.append(json.loads(res.decode('utf-8')))
retry_list = util.remove_duplicates(retry_list)
# double check in POST. This check should really be first done in
# PUT
for r in retry_list:
err_state = False
service_obj = self.service_storage\
.get_service_details_by_domain_name(r['domain_name'])
if service_obj is None and r.get('validate_service', True):
err_state = True
LOG.error(
u'Domain {0} does not exist on any service, are you '
'sure you want to proceed request, {1}? You can set '
'validate_service to False to retry this san-retry '
'request forcefully'.format(r['domain_name'], r)
)
elif (
service_obj is not None and
service_obj.operator_status.lower() == 'disabled'
):
err_state = True
LOG.error(
u'The service for domain {0} is disabled.'
'No certificates will be created for '
'service {1} while it remains in {2} operator_status'
'request forcefully'.format(
r['domain_name'],
service_obj.service_id,
service_obj.operator_status
)
)
try:
cert_for_domain = self.storage.get_certs_by_domain(
r['domain_name'])
if cert_for_domain.get_cert_status() == "deployed":
err_state = True
LOG.error(
u'Certificate on {0} has already been provisioned '
'successfully.'.format(r['domain_name']))
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(r['domain_name']))
if err_state is False:
run_list.append(r)
else:
ignore_list.append(r)
if not r.get('validate_service', True):
# validation is False, send ignored retry_list
# object back to queue
akamai_driver.mod_san_queue.enqueue_mod_san_request(
json.dumps(r)
)
LOG.warn(
"{0} was skipped because it failed validation.".format(
r['domain_name']
)
)
for cert_obj_dict in run_list:
try:
cert_obj = ssl_certificate.SSLCertificate(
cert_obj_dict['flavor_id'],
cert_obj_dict['domain_name'],
cert_obj_dict['cert_type'],
project_id=cert_obj_dict['project_id']
)
try:
cert_for_domain = (
self.storage.get_certs_by_domain(
cert_obj.domain_name,
project_id=cert_obj.project_id,
flavor_id=cert_obj.flavor_id,
cert_type=cert_obj.cert_type))
# If this cert has been deployed through manual
# process we ignore the rerun process for this entry
if cert_for_domain.get_cert_status() == 'deployed':
run_list.remove(cert_obj_dict)
ignore_list.append(cert_obj_dict)
continue
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(cert_obj.domain_name))
# rerun the san process
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'project_id': cert_obj.project_id,
'domain_name': cert_obj.domain_name,
'cert_type': cert_obj.cert_type,
'providers_list_json': json.dumps(providers),
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'enqueue': False,
'context_dict': context_utils.RequestContext(
tenant=cert_obj.project_id
).to_dict()
}
self.distributed_task_controller.submit_task(
recreate_ssl_certificate.recreate_ssl_certificate,
**kwargs)
except Exception as e:
# When exception happens we log it and re-queue this
# request
LOG.exception(e)
run_list.remove(cert_obj_dict)
ignore_list.append(cert_obj_dict)
akamai_driver.mod_san_queue.enqueue_mod_san_request(
json.dumps(cert_obj_dict)
)
# For other providers post san_retry_list implementation goes here
else:
# if not using akamai driver just return summary of run list and
# ignore list
pass
return run_list, ignore_list
def get_san_cert_configuration(self, san_cert_name):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
if san_cert_name not in akamai_driver.san_cert_cnames:
raise ValueError(
"%s is not a valid san cert, valid san certs are: %s" %
(san_cert_name, akamai_driver.san_cert_cnames))
res = akamai_driver.cert_info_storage.get_cert_config(
san_cert_name
)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def update_san_cert_configuration(self, san_cert_name, new_cert_config):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
if san_cert_name not in akamai_driver.san_cert_cnames:
raise ValueError(
"%s is not a valid san cert, valid san certs are: %s" %
(san_cert_name, akamai_driver.san_cert_cnames))
# given the spsId, determine the most recent jobId
# and persist the jobId
if new_cert_config.get('spsId') is not None:
resp = akamai_driver.sps_api_client.get(
akamai_driver.akamai_sps_api_base_url.format(
spsId=new_cert_config['spsId']
),
)
if resp.status_code != 200:
raise RuntimeError(
'SPS GET Request failed. Exception: {0}'.format(
resp.text
)
)
else:
resp_json = resp.json()
new_cert_config['jobId'] = (
resp_json['requestList'][0]['jobId']
)
res = akamai_driver.cert_info_storage.update_cert_config(
san_cert_name, new_cert_config)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def get_sni_cert_configuration(self, cert_name):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
self._validate_sni_cert_name(akamai_driver, cert_name)
res = akamai_driver.cert_info_storage.get_sni_cert_info(cert_name)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def update_sni_cert_configuration(self, cert_name, new_cert_config):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
self._validate_sni_cert_name(akamai_driver, cert_name)
res = akamai_driver.cert_info_storage.update_sni_cert_config(
cert_name,
new_cert_config
)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def get_san_cert_hostname_limit(self):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.cert_info_storage.get_san_cert_hostname_limit()
res = {'san_cert_hostname_limit': res}
else:
# if not using akamai driver just return an empty list
res = {'san_cert_hostname_limit': 0}
return res
@staticmethod
def _validate_sni_cert_name(provider_driver, cert_name):
if cert_name not in provider_driver.sni_cert_cnames:
raise ValueError(
"{0} is not a valid sni cert, "
"valid sni certs are: {1}".format(
cert_name, provider_driver.sni_cert_cnames))
def set_san_cert_hostname_limit(self, request_json):
if 'akamai' in self._driver.providers:
try:
new_limit = request_json['san_cert_hostname_limit']
except Exception as exc:
LOG.error("Error attempting to update san settings {0}".format(
exc
))
raise ValueError('Unknown setting!')
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.cert_info_storage.set_san_cert_hostname_limit(
new_limit
)
else:
# if not using akamai driver just return an empty list
res = 0
return res
def get_certs_by_status(self, status):
certs_by_status = self.storage.get_certs_by_status(status)
return certs_by_status
def update_certificate_status(self, domain_name, certificate_updates):
certificate_old = self.storage.get_certs_by_domain(domain_name)
try:
if (
certificate_updates.get("op") == "replace" and
certificate_updates.get("path") == "status" and
certificate_updates.get("value") is not None
):
if (
certificate_old.get_cert_status() !=
certificate_updates.get("value")
):
new_cert_details = certificate_old.cert_details
# update the certificate for the first provider akamai
# this logic changes when multiple certificate providers
# are supported
first_provider = list(new_cert_details.keys())[0]
first_provider_cert_details = (
list(new_cert_details.values())[0]
)
first_provider_cert_details["extra_info"][
"status"] = certificate_updates.get("value")
new_cert_details[first_provider] = json.dumps(
first_provider_cert_details
)
self.storage.update_certificate(
certificate_old.domain_name,
certificate_old.cert_type,
certificate_old.flavor_id,
new_cert_details
)
except Exception as e:
LOG.error(
"Something went wrong during certificate update: {0}".format(
e
)
)
raise errors.CertificateStatusUpdateError(e)
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_context import context as context_utils
from oslo_log import log
from poppy.common import errors
from poppy.common import util
from poppy.distributed_task.taskflow.flow import create_ssl_certificate
from poppy.distributed_task.taskflow.flow import delete_ssl_certificate
from poppy.distributed_task.taskflow.flow import recreate_ssl_certificate
from poppy.manager import base
from poppy.model.helpers import domain
from poppy.model import ssl_certificate
from poppy.transport.validators import helpers as validators
LOG = log.getLogger(__name__)
class DefaultSSLCertificateController(base.SSLCertificateController):
def __init__(self, manager):
super(DefaultSSLCertificateController, self).__init__(manager)
self.distributed_task_controller = (
self._driver.distributed_task.services_controller
)
self.storage = self._driver.storage.certificates_controller
self.service_storage = self._driver.storage.services_controller
self.flavor_controller = self._driver.storage.flavors_controller
def create_ssl_certificate(
self, project_id, cert_obj, https_upgrade=False):
if (not validators.is_valid_domain_name(cert_obj.domain_name)) or \
(validators.is_root_domain(
domain.Domain(cert_obj.domain_name).to_dict())) or \
(not validators.is_valid_tld(cert_obj.domain_name)):
# here created a http domain object but it does not matter http or
# https
raise ValueError('%s must be a valid non-root domain' %
cert_obj.domain_name)
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
try:
self.storage.create_certificate(
project_id,
cert_obj
)
# ValueError will be raised if the cert_info has already existed
except ValueError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'providers_list_json': json.dumps(providers),
'project_id': project_id,
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'context_dict': context_utils.get_current().to_dict()
}
if https_upgrade is True:
kwargs['https_upgrade'] = True
self.distributed_task_controller.submit_task(
create_ssl_certificate.create_ssl_certificate,
**kwargs)
return kwargs
def delete_ssl_certificate(self, project_id, domain_name, cert_type):
cert_obj = self.storage.get_certs_by_domain(
domain_name, cert_type=cert_type)
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'project_id': project_id,
'domain_name': domain_name,
'cert_type': cert_type,
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'providers_list_json': json.dumps(providers),
'context_dict': context_utils.get_current().to_dict()
}
self.distributed_task_controller.submit_task(
delete_ssl_certificate.delete_ssl_certificate,
**kwargs)
return kwargs
def get_certs_info_by_domain(self, domain_name, project_id):
return self.storage.get_certs_by_domain(
domain_name=domain_name,
project_id=project_id)
def get_san_retry_list(self):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.mod_san_queue.traverse_queue()
# For other providers san_retry_list implementation goes here
else:
# if not using akamai driver just return an empty list
return []
res = [json.loads(r) for r in res]
return [
{"domain_name": r['domain_name'],
"project_id": r['project_id'],
"flavor_id": r['flavor_id'],
"cert_type": r['cert_type'],
"validate_service": r.get('validate_service', True)}
for r in res
]
def update_san_retry_list(self, queue_data_list):
for r in queue_data_list:
service_obj = self.service_storage\
.get_service_details_by_domain_name(r['domain_name'])
if service_obj is None and r.get('validate_service', True):
raise LookupError(u'Domain {0} does not exist on any service, '
'are you sure you want to proceed request, '
'{1}? You can set validate_service to False '
'to retry this san-retry request forcefully'.
format(r['domain_name'], r))
cert_for_domain = None
try:
cert_for_domain = self.storage.get_certs_by_domain(
r['domain_name'])
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(r['domain_name']))
if cert_for_domain:
if cert_for_domain.get_cert_status() == "deployed":
raise ValueError(u'Cert on {0} already exists'.
format(r['domain_name']))
new_queue_data = [
json.dumps({'flavor_id': r['flavor_id'],
'domain_name': r['domain_name'],
'project_id': r['project_id'],
'cert_type': r['cert_type'],
'validate_service': r.get('validate_service', True)})
for r in queue_data_list
]
res, deleted = [], []
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
orig = [json.loads(r) for r in
akamai_driver.mod_san_queue.traverse_queue()]
res = [json.loads(r) for r in
akamai_driver.mod_san_queue.put_queue_data(new_queue_data)]
deleted = tuple(x for x in orig if x not in res)
# other provider's retry-list implementation goes here
return res, deleted
def rerun_san_retry_list(self):
run_list = []
ignore_list = []
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
retry_list = []
while len(akamai_driver.mod_san_queue.mod_san_queue_backend) > 0:
res = akamai_driver.mod_san_queue.dequeue_mod_san_request()
retry_list.append(json.loads(res.decode('utf-8')))
retry_list = util.remove_duplicates(retry_list)
# double check in POST. This check should really be first done in
# PUT
for r in retry_list:
err_state = False
service_obj = self.service_storage\
.get_service_details_by_domain_name(r['domain_name'])
if service_obj is None and r.get('validate_service', True):
err_state = True
LOG.error(
u'Domain {0} does not exist on any service, are you '
'sure you want to proceed request, {1}? You can set '
'validate_service to False to retry this san-retry '
'request forcefully'.format(r['domain_name'], r)
)
elif (
service_obj is not None and
service_obj.operator_status.lower() == 'disabled'
):
err_state = True
LOG.error(
u'The service for domain {0} is disabled.'
'No certificates will be created for '
'service {1} while it remains in {2} operator_status'
'request forcefully'.format(
r['domain_name'],
service_obj.service_id,
service_obj.operator_status
)
)
try:
cert_for_domain = self.storage.get_certs_by_domain(
r['domain_name'])
if cert_for_domain.get_cert_status() == "deployed":
err_state = True
LOG.error(
u'Certificate on {0} has already been provisioned '
'successfully.'.format(r['domain_name']))
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(r['domain_name']))
if err_state is False:
run_list.append(r)
else:
ignore_list.append(r)
if not r.get('validate_service', True):
# validation is False, send ignored retry_list
# object back to queue
akamai_driver.mod_san_queue.enqueue_mod_san_request(
json.dumps(r)
)
LOG.warn(
"{0} was skipped because it failed validation.".format(
r['domain_name']
)
)
for cert_obj_dict in run_list:
try:
cert_obj = ssl_certificate.SSLCertificate(
cert_obj_dict['flavor_id'],
cert_obj_dict['domain_name'],
cert_obj_dict['cert_type'],
project_id=cert_obj_dict['project_id']
)
try:
cert_for_domain = (
self.storage.get_certs_by_domain(
cert_obj.domain_name,
project_id=cert_obj.project_id,
flavor_id=cert_obj.flavor_id,
cert_type=cert_obj.cert_type))
# If this cert has been deployed through manual
# process we ignore the rerun process for this entry
if cert_for_domain.get_cert_status() == 'deployed':
run_list.remove(cert_obj_dict)
ignore_list.append(cert_obj_dict)
continue
except ValueError:
LOG.info("No matching certificates found for "
"the domain {}".format(cert_obj.domain_name))
# rerun the san process
try:
flavor = self.flavor_controller.get(cert_obj.flavor_id)
# raise a lookup error if the flavor is not found
except LookupError as e:
raise e
providers = [p.provider_id for p in flavor.providers]
kwargs = {
'project_id': cert_obj.project_id,
'domain_name': cert_obj.domain_name,
'cert_type': cert_obj.cert_type,
'providers_list_json': json.dumps(providers),
'cert_obj_json': json.dumps(cert_obj.to_dict()),
'enqueue': False,
'context_dict': context_utils.RequestContext(
tenant=cert_obj.project_id
).to_dict()
}
self.distributed_task_controller.submit_task(
recreate_ssl_certificate.recreate_ssl_certificate,
**kwargs)
except Exception as e:
# When exception happens we log it and re-queue this
# request
LOG.exception(e)
run_list.remove(cert_obj_dict)
ignore_list.append(cert_obj_dict)
akamai_driver.mod_san_queue.enqueue_mod_san_request(
json.dumps(cert_obj_dict)
)
# For other providers post san_retry_list implementation goes here
else:
# if not using akamai driver just return summary of run list and
# ignore list
pass
return run_list, ignore_list
def get_san_cert_configuration(self, san_cert_name):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
if san_cert_name not in akamai_driver.san_cert_cnames:
raise ValueError(
"%s is not a valid san cert, valid san certs are: %s" %
(san_cert_name, akamai_driver.san_cert_cnames))
res = akamai_driver.cert_info_storage.get_cert_config(
san_cert_name
)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def update_san_cert_configuration(self, san_cert_name, new_cert_config):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
if san_cert_name not in akamai_driver.san_cert_cnames:
raise ValueError(
"%s is not a valid san cert, valid san certs are: %s" %
(san_cert_name, akamai_driver.san_cert_cnames))
# given the spsId, determine the most recent jobId
# and persist the jobId
if new_cert_config.get('spsId') is not None:
resp = akamai_driver.sps_api_client.get(
akamai_driver.akamai_sps_api_base_url.format(
spsId=new_cert_config['spsId']
),
)
if resp.status_code != 200:
raise RuntimeError(
'SPS GET Request failed. Exception: {0}'.format(
resp.text
)
)
else:
resp_json = resp.json()
new_cert_config['jobId'] = (
resp_json['requestList'][0]['jobId']
)
res = akamai_driver.cert_info_storage.update_cert_config(
san_cert_name, new_cert_config)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def get_sni_cert_configuration(self, cert_name):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
self._validate_sni_cert_name(akamai_driver, cert_name)
res = akamai_driver.cert_info_storage.get_sni_cert_info(cert_name)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def update_sni_cert_configuration(self, cert_name, new_cert_config):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
self._validate_sni_cert_name(akamai_driver, cert_name)
res = akamai_driver.cert_info_storage.update_sni_cert_config(
cert_name,
new_cert_config
)
else:
# if not using akamai driver just return an empty list
res = {}
return res
def get_san_cert_hostname_limit(self):
if 'akamai' in self._driver.providers:
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.cert_info_storage.get_san_cert_hostname_limit()
res = {'san_cert_hostname_limit': res}
else:
# if not using akamai driver just return an empty list
res = {'san_cert_hostname_limit': 0}
return res
@staticmethod
def _validate_sni_cert_name(provider_driver, cert_name):
if cert_name not in provider_driver.sni_cert_cnames:
raise ValueError(
"{0} is not a valid sni cert, "
"valid sni certs are: {1}".format(
cert_name, provider_driver.sni_cert_cnames))
def set_san_cert_hostname_limit(self, request_json):
if 'akamai' in self._driver.providers:
try:
new_limit = request_json['san_cert_hostname_limit']
except Exception as exc:
LOG.error("Error attempting to update san settings {0}".format(
exc
))
raise ValueError('Unknown setting!')
akamai_driver = self._driver.providers['akamai'].obj
res = akamai_driver.cert_info_storage.set_san_cert_hostname_limit(
new_limit
)
else:
# if not using akamai driver just return an empty list
res = 0
return res
def get_certs_by_status(self, status):
certs_by_status = self.storage.get_certs_by_status(status)
return certs_by_status
def update_certificate_status(self, domain_name, certificate_updates):
certificate_old = self.storage.get_certs_by_domain(domain_name)
try:
if (
certificate_updates.get("op") == "replace" and
certificate_updates.get("path") == "status" and
certificate_updates.get("value") is not None
):
if (
certificate_old.get_cert_status() !=
certificate_updates.get("value")
):
new_cert_details = certificate_old.cert_details
# update the certificate for the first provider akamai
# this logic changes when multiple certificate providers
# are supported
first_provider = list(new_cert_details.keys())[0]
first_provider_cert_details = (
list(new_cert_details.values())[0]
)
first_provider_cert_details["extra_info"][
"status"] = certificate_updates.get("value")
new_cert_details[first_provider] = json.dumps(
first_provider_cert_details
)
self.storage.update_certificate(
certificate_old.domain_name,
certificate_old.cert_type,
certificate_old.flavor_id,
new_cert_details
)
except Exception as e:
LOG.error(
"Something went wrong during certificate update: {0}".format(
e
)
)
raise errors.CertificateStatusUpdateError(e)
|
en
| 0.752716
|
# Copyright (c) 2014 Rackspace, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # here created a http domain object but it does not matter http or # https # raise a lookup error if the flavor is not found # ValueError will be raised if the cert_info has already existed # raise a lookup error if the flavor is not found # For other providers san_retry_list implementation goes here # if not using akamai driver just return an empty list # other provider's retry-list implementation goes here # double check in POST. This check should really be first done in # PUT # validation is False, send ignored retry_list # object back to queue # If this cert has been deployed through manual # process we ignore the rerun process for this entry # rerun the san process # raise a lookup error if the flavor is not found # When exception happens we log it and re-queue this # request # For other providers post san_retry_list implementation goes here # if not using akamai driver just return summary of run list and # ignore list # if not using akamai driver just return an empty list # given the spsId, determine the most recent jobId # and persist the jobId # if not using akamai driver just return an empty list # if not using akamai driver just return an empty list # if not using akamai driver just return an empty list # if not using akamai driver just return an empty list # if not using akamai driver just return an empty list # update the certificate for the first provider akamai # this logic changes when multiple certificate providers # are supported
| 1.653247
| 2
|
src/cfnlint/rules/resources/UniqueNames.py
|
Adam-sHub/cfn-lint
| 1,134
|
6627725
|
<reponame>Adam-sHub/cfn-lint
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule, RuleMatch
class UniqueNames(CloudFormationLintRule):
id = 'E3007'
shortdesc = 'Unique resource and parameter names'
description = 'All resources and parameters must have unique names'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html'
tags = ['parameters', 'resources']
def match(self, cfn):
matches = []
for resource in cfn.get_resources():
if resource in cfn.template.get('Parameters', {}):
matches.append(RuleMatch(['Resources', resource], 'Resources and Parameters must not share name: ' + resource))
return matches
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule, RuleMatch
class UniqueNames(CloudFormationLintRule):
id = 'E3007'
shortdesc = 'Unique resource and parameter names'
description = 'All resources and parameters must have unique names'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resources-section-structure.html'
tags = ['parameters', 'resources']
def match(self, cfn):
matches = []
for resource in cfn.get_resources():
if resource in cfn.template.get('Parameters', {}):
matches.append(RuleMatch(['Resources', resource], 'Resources and Parameters must not share name: ' + resource))
return matches
|
en
| 0.650999
|
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
| 2.220334
| 2
|
iter/multi_process.py
|
kissf-lu/python_test
| 3
|
6627726
|
from simpy import Environment, Resource
def print_stats(res):
print('%d of %d slots are allocated.' % (res.count, res.capacity))
print(' Users:', res.users)
print(' Queued events:', res.queue)
def user(env, res, name):
"""
"""
with res.request() as req:
# 每个req的yield迭代两个list: server queue, waiting queue,
# 每次服务队列迭代server queue 获取在一次
yield req
print(name, 'uer obj is--->', res.users[0], 'at', env.now)
# capacity个request都延迟1秒才能释放资源,
# with内的yield timeout 用于阻塞正在服务的request一段时间才释放,
# 释放后被阻塞的waiting request 立即获得服务资源
yield env.timeout(5)
print(name, 'out with', env.now)
# with外的yield timeout不会阻塞waiting request获得服务资源,
#
yield env.timeout(2)
print(name, 'out process at',env.now)
def multi_process(env, res):
num = 0
while True:
# yield env.timeout(1)
env.process(user(env, res, num))
num += 1
if num >= 10:
break
if __name__ == '__main__':
env = Environment()
res = Resource(env, capacity=5)
# env.process(multi_process(env, res))
multi_process(env, res)
env.run(until=15)
|
from simpy import Environment, Resource
def print_stats(res):
print('%d of %d slots are allocated.' % (res.count, res.capacity))
print(' Users:', res.users)
print(' Queued events:', res.queue)
def user(env, res, name):
"""
"""
with res.request() as req:
# 每个req的yield迭代两个list: server queue, waiting queue,
# 每次服务队列迭代server queue 获取在一次
yield req
print(name, 'uer obj is--->', res.users[0], 'at', env.now)
# capacity个request都延迟1秒才能释放资源,
# with内的yield timeout 用于阻塞正在服务的request一段时间才释放,
# 释放后被阻塞的waiting request 立即获得服务资源
yield env.timeout(5)
print(name, 'out with', env.now)
# with外的yield timeout不会阻塞waiting request获得服务资源,
#
yield env.timeout(2)
print(name, 'out process at',env.now)
def multi_process(env, res):
num = 0
while True:
# yield env.timeout(1)
env.process(user(env, res, num))
num += 1
if num >= 10:
break
if __name__ == '__main__':
env = Environment()
res = Resource(env, capacity=5)
# env.process(multi_process(env, res))
multi_process(env, res)
env.run(until=15)
|
zh
| 0.448901
|
# 每个req的yield迭代两个list: server queue, waiting queue, # 每次服务队列迭代server queue 获取在一次 # capacity个request都延迟1秒才能释放资源, # with内的yield timeout 用于阻塞正在服务的request一段时间才释放, # 释放后被阻塞的waiting request 立即获得服务资源 # with外的yield timeout不会阻塞waiting request获得服务资源, # # yield env.timeout(1) # env.process(multi_process(env, res))
| 2.78621
| 3
|
trinity/components/builtin/peer_discovery/component.py
|
teotoplak/trinity
| 0
|
6627727
|
from argparse import (
ArgumentParser,
_SubParsersAction,
)
import asyncio
from typing import (
Type,
)
from lahja import EndpointAPI
from p2p.abc import ProtocolAPI
from p2p.constants import (
DISCOVERY_EVENTBUS_ENDPOINT,
)
from p2p.discovery import (
DiscoveryService,
PreferredNodeDiscoveryProtocol,
StaticDiscoveryService,
)
from p2p.kademlia import (
Address,
)
from p2p.service import (
BaseService,
)
from trinity.config import (
Eth1AppConfig,
Eth1DbMode,
TrinityConfig,
)
from trinity.events import ShutdownRequest
from trinity.extensibility import (
AsyncioIsolatedComponent,
)
from trinity.protocol.eth.proto import (
ETHProtocol,
)
from trinity.protocol.les.proto import (
LESProtocolV2,
)
from trinity._utils.shutdown import (
exit_with_services,
)
def get_protocol(trinity_config: TrinityConfig) -> Type[ProtocolAPI]:
# For now DiscoveryByTopicProtocol supports a single topic, so we use the latest
# version of our supported protocols. Maybe this could be more generic?
# TODO: This needs to support the beacon protocol when we have a way to
# check the config, if trinity is being run as a beacon node.
eth1_config = trinity_config.get_app_config(Eth1AppConfig)
if eth1_config.database_mode is Eth1DbMode.LIGHT:
return LESProtocolV2
else:
return ETHProtocol
class DiscoveryBootstrapService(BaseService):
"""
Bootstrap discovery to provide a parent ``CancellationToken``
"""
def __init__(self,
disable_discovery: bool,
event_bus: EndpointAPI,
trinity_config: TrinityConfig) -> None:
super().__init__()
self.is_discovery_disabled = disable_discovery
self.event_bus = event_bus
self.trinity_config = trinity_config
async def _run(self) -> None:
external_ip = "0.0.0.0"
address = Address(external_ip, self.trinity_config.port, self.trinity_config.port)
discovery_protocol = PreferredNodeDiscoveryProtocol(
self.trinity_config.nodekey,
address,
self.trinity_config.bootstrap_nodes,
self.trinity_config.preferred_nodes,
self.cancel_token,
)
if self.is_discovery_disabled:
discovery_service: BaseService = StaticDiscoveryService(
self.event_bus,
self.trinity_config.preferred_nodes,
self.cancel_token,
)
else:
discovery_service = DiscoveryService(
discovery_protocol,
self.trinity_config.port,
self.event_bus,
self.cancel_token,
)
try:
await discovery_service.run()
except Exception:
await self.event_bus.broadcast(ShutdownRequest("Discovery ended unexpectedly"))
class PeerDiscoveryComponent(AsyncioIsolatedComponent):
"""
Continously discover other Ethereum nodes.
"""
@property
def name(self) -> str:
return "Discovery"
@property
def normalized_name(self) -> str:
return DISCOVERY_EVENTBUS_ENDPOINT
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
self.start()
@classmethod
def configure_parser(cls,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
arg_parser.add_argument(
"--disable-discovery",
action="store_true",
help="Disable peer discovery",
)
def do_start(self) -> None:
discovery_bootstrap = DiscoveryBootstrapService(
self.boot_info.args.disable_discovery,
self.event_bus,
self.boot_info.trinity_config
)
asyncio.ensure_future(exit_with_services(
discovery_bootstrap,
self._event_bus_service,
))
asyncio.ensure_future(discovery_bootstrap.run())
|
from argparse import (
ArgumentParser,
_SubParsersAction,
)
import asyncio
from typing import (
Type,
)
from lahja import EndpointAPI
from p2p.abc import ProtocolAPI
from p2p.constants import (
DISCOVERY_EVENTBUS_ENDPOINT,
)
from p2p.discovery import (
DiscoveryService,
PreferredNodeDiscoveryProtocol,
StaticDiscoveryService,
)
from p2p.kademlia import (
Address,
)
from p2p.service import (
BaseService,
)
from trinity.config import (
Eth1AppConfig,
Eth1DbMode,
TrinityConfig,
)
from trinity.events import ShutdownRequest
from trinity.extensibility import (
AsyncioIsolatedComponent,
)
from trinity.protocol.eth.proto import (
ETHProtocol,
)
from trinity.protocol.les.proto import (
LESProtocolV2,
)
from trinity._utils.shutdown import (
exit_with_services,
)
def get_protocol(trinity_config: TrinityConfig) -> Type[ProtocolAPI]:
# For now DiscoveryByTopicProtocol supports a single topic, so we use the latest
# version of our supported protocols. Maybe this could be more generic?
# TODO: This needs to support the beacon protocol when we have a way to
# check the config, if trinity is being run as a beacon node.
eth1_config = trinity_config.get_app_config(Eth1AppConfig)
if eth1_config.database_mode is Eth1DbMode.LIGHT:
return LESProtocolV2
else:
return ETHProtocol
class DiscoveryBootstrapService(BaseService):
"""
Bootstrap discovery to provide a parent ``CancellationToken``
"""
def __init__(self,
disable_discovery: bool,
event_bus: EndpointAPI,
trinity_config: TrinityConfig) -> None:
super().__init__()
self.is_discovery_disabled = disable_discovery
self.event_bus = event_bus
self.trinity_config = trinity_config
async def _run(self) -> None:
external_ip = "0.0.0.0"
address = Address(external_ip, self.trinity_config.port, self.trinity_config.port)
discovery_protocol = PreferredNodeDiscoveryProtocol(
self.trinity_config.nodekey,
address,
self.trinity_config.bootstrap_nodes,
self.trinity_config.preferred_nodes,
self.cancel_token,
)
if self.is_discovery_disabled:
discovery_service: BaseService = StaticDiscoveryService(
self.event_bus,
self.trinity_config.preferred_nodes,
self.cancel_token,
)
else:
discovery_service = DiscoveryService(
discovery_protocol,
self.trinity_config.port,
self.event_bus,
self.cancel_token,
)
try:
await discovery_service.run()
except Exception:
await self.event_bus.broadcast(ShutdownRequest("Discovery ended unexpectedly"))
class PeerDiscoveryComponent(AsyncioIsolatedComponent):
"""
Continously discover other Ethereum nodes.
"""
@property
def name(self) -> str:
return "Discovery"
@property
def normalized_name(self) -> str:
return DISCOVERY_EVENTBUS_ENDPOINT
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
self.start()
@classmethod
def configure_parser(cls,
arg_parser: ArgumentParser,
subparser: _SubParsersAction) -> None:
arg_parser.add_argument(
"--disable-discovery",
action="store_true",
help="Disable peer discovery",
)
def do_start(self) -> None:
discovery_bootstrap = DiscoveryBootstrapService(
self.boot_info.args.disable_discovery,
self.event_bus,
self.boot_info.trinity_config
)
asyncio.ensure_future(exit_with_services(
discovery_bootstrap,
self._event_bus_service,
))
asyncio.ensure_future(discovery_bootstrap.run())
|
en
| 0.837319
|
# For now DiscoveryByTopicProtocol supports a single topic, so we use the latest # version of our supported protocols. Maybe this could be more generic? # TODO: This needs to support the beacon protocol when we have a way to # check the config, if trinity is being run as a beacon node. Bootstrap discovery to provide a parent ``CancellationToken`` Continously discover other Ethereum nodes.
| 2.030526
| 2
|
geneparse/index/impute2.py
|
legaultmarc/geneparse
| 4
|
6627728
|
<reponame>legaultmarc/geneparse
"""IMPUTE2 index."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import io
import zlib
from os import path
import numpy as np
import pandas as pd
# This was copied from the 'genipe' module
_CHECK_STRING = b"GENIPE INDEX FILE"
try:
from Bio.bgzf import BgzfReader
HAS_BIOPYTHON = True
except ImportError:
HAS_BIOPYTHON = False
def _seek_generator(f):
"""Yields seek position for each line.
Args:
f (file): the file object.
"""
yield 0
for line in f:
yield f.tell()
def generate_index(fn, cols=None, names=None, sep=" "):
"""Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
"""
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Reading the required columns
data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names,
compression="gzip" if bgzip else None)
# Getting the seek information
f = open_func(fn, "rb")
data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]
f.close()
# Saving the index to file
write_index(get_index_fn(fn), data)
return data
def get_open_func(fn, return_fmt=False):
"""Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function.
"""
# The file might be compressed using bgzip
bgzip = None
with open(fn, "rb") as i_file:
bgzip = i_file.read(3) == b"\x1f\x8b\x08"
if bgzip and not HAS_BIOPYTHON:
raise ValueError("needs BioPython to index a bgzip file")
open_func = open
if bgzip:
open_func = BgzfReader
# Trying to read
try:
with open_func(fn, "r") as i_file:
if bgzip:
if not i_file.seekable():
raise ValueError
pass
except ValueError:
raise ValueError("{}: use bgzip for compression...".format(fn))
if return_fmt:
return bgzip, open_func
return open_func
def get_index(fn, cols, names, sep):
"""Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created.
"""
if not has_index(fn):
# The index doesn't exists, generate it
return generate_index(fn, cols, names, sep)
# Retrieving the index
file_index = read_index(get_index_fn(fn))
# Checking the names are there
if len(set(names) - (set(file_index.columns) - {'seek'})) != 0:
raise ValueError("{}: missing index columns: reindex".format(fn))
if "seek" not in file_index.columns:
raise ValueError("{}: invalid index: reindex".format(fn))
return file_index
def write_index(fn, index):
"""Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index.
"""
with open(fn, "wb") as o_file:
o_file.write(_CHECK_STRING)
o_file.write(zlib.compress(bytes(
index.to_csv(None, index=False, encoding="utf-8"),
encoding="utf-8",
)))
def read_index(fn):
"""Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
"""
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
))
return index
def get_index_fn(fn):
"""Generates the index filename from the path to the indexed file.
Args:
fn (str): the name of the file for which we want an index.
Returns:
str: the name of the file containing the index.
"""
return path.abspath("{}.idx".format(fn))
def has_index(fn):
"""Checks if the index exists.
Args:
fn (str): the name of the file for which we want the index.
Returns:
bool: ``True`` if the file contains an index, ``False`` otherwise.
"""
return path.isfile(get_index_fn(fn))
|
"""IMPUTE2 index."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import io
import zlib
from os import path
import numpy as np
import pandas as pd
# This was copied from the 'genipe' module
_CHECK_STRING = b"GENIPE INDEX FILE"
try:
from Bio.bgzf import BgzfReader
HAS_BIOPYTHON = True
except ImportError:
HAS_BIOPYTHON = False
def _seek_generator(f):
"""Yields seek position for each line.
Args:
f (file): the file object.
"""
yield 0
for line in f:
yield f.tell()
def generate_index(fn, cols=None, names=None, sep=" "):
"""Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
"""
# Some assertions
assert cols is not None, "'cols' was not set"
assert names is not None, "'names' was not set"
assert len(cols) == len(names)
# Getting the open function
bgzip, open_func = get_open_func(fn, return_fmt=True)
# Reading the required columns
data = pd.read_csv(fn, sep=sep, engine="c", usecols=cols, names=names,
compression="gzip" if bgzip else None)
# Getting the seek information
f = open_func(fn, "rb")
data["seek"] = np.fromiter(_seek_generator(f), dtype=np.uint)[:-1]
f.close()
# Saving the index to file
write_index(get_index_fn(fn), data)
return data
def get_open_func(fn, return_fmt=False):
"""Get the opening function.
Args:
fn (str): the name of the file.
return_fmt (bool): if the file format needs to be returned.
Returns:
tuple: either a tuple containing two elements: a boolean telling if the
format is bgzip, and the opening function.
"""
# The file might be compressed using bgzip
bgzip = None
with open(fn, "rb") as i_file:
bgzip = i_file.read(3) == b"\x1f\x8b\x08"
if bgzip and not HAS_BIOPYTHON:
raise ValueError("needs BioPython to index a bgzip file")
open_func = open
if bgzip:
open_func = BgzfReader
# Trying to read
try:
with open_func(fn, "r") as i_file:
if bgzip:
if not i_file.seekable():
raise ValueError
pass
except ValueError:
raise ValueError("{}: use bgzip for compression...".format(fn))
if return_fmt:
return bgzip, open_func
return open_func
def get_index(fn, cols, names, sep):
"""Restores the index for a given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
If the index doesn't exist for the file, it is first created.
"""
if not has_index(fn):
# The index doesn't exists, generate it
return generate_index(fn, cols, names, sep)
# Retrieving the index
file_index = read_index(get_index_fn(fn))
# Checking the names are there
if len(set(names) - (set(file_index.columns) - {'seek'})) != 0:
raise ValueError("{}: missing index columns: reindex".format(fn))
if "seek" not in file_index.columns:
raise ValueError("{}: invalid index: reindex".format(fn))
return file_index
def write_index(fn, index):
"""Writes the index to file.
Args:
fn (str): the name of the file that will contain the index.
index (pandas.DataFrame): the index.
"""
with open(fn, "wb") as o_file:
o_file.write(_CHECK_STRING)
o_file.write(zlib.compress(bytes(
index.to_csv(None, index=False, encoding="utf-8"),
encoding="utf-8",
)))
def read_index(fn):
"""Reads index from file.
Args:
fn (str): the name of the file containing the index.
Returns:
pandas.DataFrame: the index of the file.
Before reading the index, we check the first couple of bytes to see if it
is a valid index file.
"""
index = None
with open(fn, "rb") as i_file:
if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING:
raise ValueError("{}: not a valid index file".format(fn))
index = pd.read_csv(io.StringIO(
zlib.decompress(i_file.read()).decode(encoding="utf-8"),
))
return index
def get_index_fn(fn):
"""Generates the index filename from the path to the indexed file.
Args:
fn (str): the name of the file for which we want an index.
Returns:
str: the name of the file containing the index.
"""
return path.abspath("{}.idx".format(fn))
def has_index(fn):
"""Checks if the index exists.
Args:
fn (str): the name of the file for which we want the index.
Returns:
bool: ``True`` if the file contains an index, ``False`` otherwise.
"""
return path.isfile(get_index_fn(fn))
|
en
| 0.786885
|
IMPUTE2 index. # This file is part of geneparse. # # The MIT License (MIT) # # Copyright (c) 2017 Pharmacogenomics Centre # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # This was copied from the 'genipe' module Yields seek position for each line. Args: f (file): the file object. Build a index for the given file. Args: fn (str): the name of the file. cols (list): a list containing column to keep (as int). names (list): the name corresponding to the column to keep (as str). sep (str): the field separator. Returns: pandas.DataFrame: the index. # Some assertions # Getting the open function # Reading the required columns # Getting the seek information # Saving the index to file Get the opening function. Args: fn (str): the name of the file. return_fmt (bool): if the file format needs to be returned. Returns: tuple: either a tuple containing two elements: a boolean telling if the format is bgzip, and the opening function. # The file might be compressed using bgzip # Trying to read Restores the index for a given file. Args: fn (str): the name of the file. cols (list): a list containing column to keep (as int). names (list): the name corresponding to the column to keep (as str). sep (str): the field separator. Returns: pandas.DataFrame: the index. If the index doesn't exist for the file, it is first created. # The index doesn't exists, generate it # Retrieving the index # Checking the names are there Writes the index to file. Args: fn (str): the name of the file that will contain the index. index (pandas.DataFrame): the index. Reads index from file. Args: fn (str): the name of the file containing the index. Returns: pandas.DataFrame: the index of the file. Before reading the index, we check the first couple of bytes to see if it is a valid index file. Generates the index filename from the path to the indexed file. Args: fn (str): the name of the file for which we want an index. Returns: str: the name of the file containing the index. Checks if the index exists. Args: fn (str): the name of the file for which we want the index. Returns: bool: ``True`` if the file contains an index, ``False`` otherwise.
| 2.617793
| 3
|
scrapers/cdms.py
|
nseifert/splatalogue
| 0
|
6627729
|
import urllib2
from bs4 import BeautifulSoup
import time
import numpy as np
import pandas as pd
from itertools import izip_longest
from collections import OrderedDict
import re
import MySQLdb as sqldb
import easygui as eg
from QNFormat import *
import sys
import os
class CDMSMolecule:
def parse_cat(self, cat_url=None, local=0):
""" This function takes a Pickett prediction file (a so-called "CAT" file) and converts it into a Pandas DataFrame.
This code should work for any well-formed CAT file, and works for all CDMS and JPL entries, as well as custom, user-generated
CAT files. It is unclear if there are any edge cases this misses as CAT files are fairly rigorous in their formatting.
"""
num_qns = 0
def l_to_idx(letter): # For when a QN > 99
_abet = 'abcdefghijklmnopqrstuvwxyz'
return next((z for z, _letter in enumerate(_abet) if _letter == letter.lower()), None)
# Generates a parsing string formatter for CAT file rows
def make_parser(fieldwidths):
def accumulate(iterable):
total = next(iterable)
yield total
for value in iterable:
total += value
yield total
cuts = tuple(cut for cut in accumulate(abs(fw) for fw in fieldwidths))
pads = tuple(fw < 0 for fw in fieldwidths) # bool for padding
flds = tuple(izip_longest(pads, (0,)+cuts, cuts))[:-1] # don't need final one
def parse(lyne): return tuple(lyne[i:j] for pad, i, j in flds if not pad)
parse.size = sum(abs(fw) for fw in fieldwidths)
parse.fmtstring = ' '.join('{}{}'.format(abs(fw), 'x' if fw < 0 else 's') for fw in fieldwidths)
return parse
widths = [13, 8, 8, 2, 10, 3, 7, 4] # Character widths for each CAT file entry, not including quantum numbers
w_sum = sum(widths)
parser = make_parser(tuple(widths))
try:
print '========\n'+ cat_url.name + '\n========\n'
except AttributeError: # cat_url is a string:
print '========\n'+ cat_url + '\n========\n'
if local == 0:
cat_inp = urllib2.urlopen(cat_url).read()
# Save cat_inp to working directory
with open(self.working_directory+'/'+self.tag+"_"+self.name+'.cat','wb') as otpt:
otpt.write(cat_inp)
# Split by line to ready CAT file for parse
cat_inp = cat_inp.split('\n')
else:
cat_inp = cat_url.read().split('\n')
initial_list = []
j = 0
for line in cat_inp: # Parses the CAT file into chunks
if j == 0:
qn_len = len(line)-w_sum
widths.append(qn_len)
parser = make_parser(widths)
initial_list.append(parser(line))
j += 1
# Let's put everything together to put into a dataframe
parsed_list = []
qn_parser = make_parser((2,)*12)
max_qn_length = 0 # For fitting strings into temporary numpy array
for row in initial_list:
if num_qns == 0: # Get number of quantum numbers per state
try:
num_qns = int(row[7][-1])
except IndexError: # You should never end up here unless there's a crazy edge case or a badly formed CAT file.
print row
raise
# This is a really hacky way to parse the quantum numbers, but it's robust and has worked without a hitch so far.
# Uses a series of if-else statements to iterate through the QNs in a linear fashion
raw_qn = row[-1].rstrip()
if len(raw_qn) > max_qn_length:
max_qn_length = len(raw_qn)
qns = qn_parser(row[-1]) # splits QN entry into pairs
up_done = False # Boolean for being done with the upper state QNs
in_middle = False # Are we in the character gap between the upper and lower state QNs?
down_done = False # Boolean for being down with the lower state QNs
qns_up = []
qns_down = []
down_idx = 0
for i, val in enumerate(qns):
if i == num_qns:
up_done = True
in_middle = True
if up_done and in_middle and val.strip() == '':
continue
if up_done and in_middle and val.strip() != '':
in_middle = False
if down_idx == num_qns:
down_done = True
if not up_done and not in_middle: # Still in the upper state
try:
qns_up.append(int(val))
except ValueError: # In case it isn't an integer quantum number
try:
if val.strip() == '+': # For parity symbols in CH3OH, for instance
qns_up.append(1)
elif val.strip() == '-':
qns_up.append(-1)
elif val.strip() == '': # No parity symbol?
qns_up.append(0)
elif re.search('[A-Z]', val.strip()): # QN > 99
temp = list(val)
qns_up.append((100 + (l_to_idx(temp[0]))*10) + int(temp[1]))
elif re.search('[a-z]', val.strip()): # QN < -9, e.g. CDMS CD3CN entry
temp = list(val)
qns_up.append((-10 - l_to_idx(temp[0])*10) - int(temp[1]))
except TypeError: # You shouldn't ever get here, but just in case...
print i, val, [x.strip() for x in qns]
raise
if up_done and (not down_done and not in_middle): # Hit the beginning of the lower states
down_idx += 1
try:
qns_down.append(int(val))
except ValueError:
try:
if val.strip() == '+':
qns_down.append(1)
elif val.strip() == '-':
qns_down.append(-1)
elif val.strip() == '':
qns_down.append(0)
elif re.search('[A-Z]', val.strip()): # QN > 99
temp = list(val)
qns_down.append((100 + (l_to_idx(temp[0]))*10) + int(temp[1]))
elif re.search('[a-z]', val.strip()): # QN < -9, e.g. CDMS CD3CN entry
temp = list(val)
qns_down.append((-10 - l_to_idx(temp[0])*10) - int(temp[1]))
except TypeError:
print i, val, [x.strip() for x in qns]
raise
try:
parsed_list.append([float(s.strip()) for s in row[:-1]] + [raw_qn] + [qns_up, qns_down])
except ValueError: # Get blank line or other issue?
line = [s.strip() for s in row[:-1]]
if not line[0]: # Blank line
continue
elif any([char.isalpha() for char in line[5]]): # Upper state degeneracy > 99:
line[5] = 1000 + l_to_idx(line[5][0])*100 + int(line[5][1:])
parsed_list.append([float(col) for col in line] + [raw_qn] + [qns_up, qns_down])
# Generates columns for dataframe that correlate with columns in main
dtypes = [('frequency', 'f8'), ('uncertainty', 'f8'), ('intintensity', 'f8'), ('degree_freedom', 'i4'),
('lower_state_energy', 'f8'), ('upper_state_degeneracy', 'i4'), ('molecule_tag', 'i4'),
('qn_code', 'i4'), ('raw_qn', 'S%i'%max_qn_length)]
dtypes.extend([('qn_up_%s' % i, 'i4') for i in range(len(parsed_list[0][-2]))])
dtypes.extend([('qn_dwn_%s' % i, 'i4') for i in range(len(parsed_list[0][-2]))])
final_list = []
for row in parsed_list:
final_list.append(tuple(row[:-2]+row[-2]+row[-1]))
nplist = np.zeros((len(final_list),), dtype=dtypes)
nplist[:] = final_list
return pd.DataFrame(nplist)
# Not used but useful in case you want to append custom lines to a linelist
def add_row(self, row_name, value):
@staticmethod
def add(cat, row, val):
cat[row] = val
return cat
add(self.cat, row_name, value)
def parse_formula(self, input_formula):
common_isotopes = ['13C', '15N','18O','17O','33S','34S','36S', '40Ar', '26Al','30Si','29Si','65Cu','52Cr','66Zn', '68Zn','35Cl','36Cl','37Cl','39K', '40K', '41K','46Ti','50Ti']
# Get rid of any junk after a comma, usually some state descriptor
if ',' in input_formula:
output_formula = input_formula.split(',')[0]
leftovers = ' '.join(input_formula.split(',')[1:])
else:
output_formula = input_formula
leftovers = ''
for isotope in common_isotopes:
# Do isotopes first
if isotope in output_formula:
num_part, element = re.findall(r'[^\W\d_]+|\d+', isotope)
output_formula = output_formula.replace(isotope, '<sup>'+num_part+'</sup>'+element)
# Replace every other number with <sub>
atoms_with_multiplicity = re.findall(r'[A-Z][a-z]*\d+', output_formula)
for atom in atoms_with_multiplicity:
element, num_part = re.findall(r'[^\W\d_]+|\d+', atom)
output_formula = output_formula.replace(atom, element+'<sub>'+num_part+'</sub>',1)
# Add <sub> to any parenthesized subgroup of the formula
parenthetical_subgroups = re.findall(r'\)\d+', output_formula)
for subgroup in parenthetical_subgroups:
output_formula = output_formula.replace(subgroup, ')'+'<sub>'+subgroup.split(')')[1]+'</sub>')
# Now, let's build s_name and s_name_noparens
s_name = output_formula.replace('<sup>','(').replace('</sup>', ')').replace('<sub>','').replace('</sub>','')
s_name_noparens = s_name.replace('(','').replace(')','')
return output_formula+leftovers, s_name+leftovers, s_name_noparens+leftovers
# Scrapes CDMS site to generate metadata
def get_metadata(self, meta_url):
print self.name
metadata = {} # Dictionary for metadata, keys are consistent with columns in SQL
# Dictionaries to connect string values in CDMS metadata to SQL columns
q_temps = {'2000.': 'Q_2000_', '1000.': 'Q_1000_', '500.0': 'Q_500_0', '300.0': 'Q_300_0',
'225.0': 'Q_225_0', '150.0': 'Q_150_0', '75.00': 'Q_75_00', '37.50': 'Q_37_50',
'18.75': 'Q_18_75', '9.375': 'Q_9_375', '5.000': 'Q_5_00', '2.725': 'Q_2_725'}
q_temps_alt = {'2000.': 'Q_2000_', '1000.': 'Q_1000_', '500.': 'Q_500_0', '300.': 'Q_300_0',
'225.0': 'Q_225_0', '150.': 'Q_150_0', '075.': 'Q_75_00', '37.50': 'Q_37_50',
'18.75': 'Q_18_75', '9.375': 'Q_9_375', '5.000': 'Q_5_00', '2.725': 'Q_2_725'} # Bullshit workaround to fix some non-standard stuff in Holger's entries
dipoles = {'a / D': 'MU_A', 'b / D': 'MU_B', 'c / D': 'MU_C'}
# Initialize scraper
meta_page = urllib2.urlopen(meta_url)
meta_page_read = meta_page.read()
with open(self.working_directory+'/'+self.tag+"_"+self.name+'.html','wb') as otpt:
otpt.write(meta_page_read)
soup = BeautifulSoup(meta_page_read, 'lxml')
# Grab formula
formula = soup.find_all('caption')[0].get_text().split('\n')[0].encode('utf-8')
# Need to add lit data / dipoles etc
meta_page.close()
table = soup.find_all('tr')
for entry in table: # Right now it seems the best way is to brute force this
temp = entry.get_text()
metadata['Name'] = self.name
metadata['Date'] = time.strftime('%b. %Y', self.date)
if 'Contributor' in temp:
if self.ll_id == '10':
metadata['Contributor'] = '<NAME>'
else:
metadata['Contributor'] = temp.split('Contributor')[1].encode('utf-8')
# Pull out spin-rotation partition function values
for key,key2 in zip(q_temps,q_temps_alt):
if 'Q(%s)' % key in temp:
metadata[q_temps[key].encode('utf-8')] = temp.split('Q(%s)' % key)[1].encode('utf-8')
elif 'Q(%s)' % key2 in temp:
metadata[q_temps_alt[key2].encode('utf-8')] = temp.split('Q(%s)' % key2)[1].encode('utf-8')
def value_check(x): return any(i.isdigit() for i in x)
def pull_float(x): return re.findall(r'\d+.\d+', x)
for key in dipoles:
if key in temp:
if value_check(temp) and 'Q(' not in temp:
metadata[dipoles[key]] = pull_float(temp)[0].encode('utf-8')
if ('/ MHz' in temp or re.findall(r'[A-C]\d.\d+', temp)) and 'Q(' not in temp:
if value_check(temp):
if 'A' in temp:
metadata['A'] = pull_float(temp)[0].encode('utf-8')
if 'B' in temp:
metadata['B'] = pull_float(temp)[0].encode('utf-8')
if 'C' in temp:
metadata['C'] = pull_float(temp)[0].encode('utf-8')
metadata['Ref1'] = str(soup.find_all('p')[0]).replace('\n', ' ')
# Some hard-coded replace statements for weird things that don't parse correctly when displaying the metadata
metadata['Ref1'] = metadata["Ref1"].replace('\xc2\x96','-') # Fixes long dashes that Holger sometimes likes to use
return self.parse_formula(formula), metadata
# Calculates all derived parameters from data in the CAT file, e.g. lower/upper state energies, sijmu2 values, etc.
# Currently does NOT calculate sij values, because of the case-by-case, or even line-by-line, difficulty on how to identify the electric dipole to divide by
@staticmethod
def calc_derived_params(cat, metadata):
try:
q_spinrot = float(metadata['Q_300_0'])
except ValueError: # in case there's multiple numbers
q_spinrot = float(metadata['Q_300_0'].split('(')[0])
kt_300_cm1 = 208.50908
cat['sijmu2'] = 2.40251E4 * 10**(cat['intintensity']) * q_spinrot * (1./cat['frequency']) * \
(1./(np.exp(-1.0*cat['lower_state_energy']/kt_300_cm1) -
np.exp(-1.0*(cat['frequency']/29979.2458+cat['lower_state_energy'])/kt_300_cm1)))
cat['aij'] = np.log10(1.16395E-20*cat['frequency']**3*cat['sijmu2']/cat['upper_state_degeneracy'])
cat['lower_state_energy_K'] = cat['lower_state_energy']*1.4387863
cat['upper_state_energy'] = cat['lower_state_energy'] + cat['frequency']/29979.2458
cat['upper_state_energy_K'] = cat['upper_state_energy']*1.4387863
cat['error'] = cat['uncertainty']
cat['roundedfreq'] = cat['frequency'].round(0)
cat['line_wavelength'] = 299792458./(cat['frequency']*1.0E6)*1000
cat['quantum_numbers'] = cat['raw_qn']
# Add measured freqs and then ordered frequencies
cat['measfreq'] = np.nan
cat['orderedfreq'] = np.nan
cat['measerrfreq'] = np.nan
mask_meas = (cat['molecule_tag'] < 0)
mask_pred = (cat['molecule_tag'] > 0)
cat['measfreq'][mask_meas] = cat['frequency'][mask_meas]
cat['frequency'][mask_meas] = np.nan
cat['orderedfreq'][mask_meas] = cat['measfreq'][mask_meas]
cat['measerrfreq'][mask_meas] = cat['uncertainty'][mask_meas]
cat['orderedfreq'][mask_pred] = cat['frequency'][mask_pred]
cat['transition_in_space'] = '0'
return cat
def create_directory(self):
save_path = 'working_molecules/'
folder_name = 'CDMS_'+self.tag+"_"+self.name+'_'+time.strftime('%b%Y', self.date)
total_path = save_path+folder_name
# Check to see if folder already exists; if so, we'll append an integer to it
if os.path.isdir(total_path):
# There might be more than 1, so we should add +1 to the tally if so
dupe_idx = 1
while os.path.isdir(total_path+'_{:d}'.format(dupe_idx)):
dupe_idx += 1
total_path = save_path+folder_name+'_{:d}'.format(dupe_idx)
try:
os.makedirs(total_path)
except OSError:
print('Creation of directory %s failed' %(total_path,))
else:
print('Created working directory for molecular information at: %s' %(total_path,))
return total_path
def __init__(self, cdms_inp, custom=False, ll_id='10', custom_path="", write_directory=True):
base_url = "http://cdms.astro.uni-koeln.de"
self.tag = cdms_inp[0]
self.name = cdms_inp[1]
self.date = cdms_inp[2]
self.cat_url = cdms_inp[3]
self.meta_url = cdms_inp[4]
self.ll_id = ll_id
if write_directory:
self.working_directory = self.create_directory()
if custom:
self.cat = self.parse_cat(cat_url=open(custom_path, 'r'), local=1)
else:
self.cat = self.parse_cat(cat_url=base_url+self.cat_url)
(self.formula, self.s_name, self.s_name_noparens), self.metadata = self.get_metadata(base_url+self.meta_url)
self.cat = self.calc_derived_params(self.cat, self.metadata)
self.cat['ll_id'] = self.ll_id
self.cat['`v3.0`'] = '3'
# Write parsed CAT dataframe to CSV file
self.cat.to_csv(path_or_buf=self.working_directory+'/'+self.tag+"_"+self.name+'_parsed_cat.csv')
for key in self.metadata:
print key, ': ', self.metadata[key]
class SplatSpeciesResultList(list):
def __new__(cls, data=None):
obj = super(SplatSpeciesResultList, cls).__new__(cls, data)
return obj
def __str__(self):
it = list(self)
it[0] = "0"*(4-len(str(it[0])))+str(it[0])
return "{:5} {:10} {:10} {:>25} {:>15}".format(it[0], it[1], it[5], it[3], it[4])
class CDMSChoiceList(list):
def __new__(cls, data=None):
obj = super(CDMSChoiceList, cls).__new__(cls, data)
return obj
def __str__(self):
it = list(self)
it[0] = "0"*(4-len(it[0]))+it[0]
return "{:5} {:10} {:>25} {:>15}".format(it[0], it[1], it[2], time.strftime("%B %Y", it[3]))
def unidrop(x): # Strips any non-ASCII unicode text from string
return re.sub(r'[^\x00-\x7F]+', ' ', x)
def pretty_print(comp):
form = "{:5}\t{:45}\t{:15}\t{:40} {:40}"
output = form.format(*('Tag', 'Molecule', 'Date', 'Cat Link', 'Metadata Link'))+'\n'
for row in comp:
output += form.format(*(row[0], row[1], time.strftime("%B %Y", row[2]), row[3], row[4]))+'\n'
return output
def pull_updates():
base_url = "http://cdms.astro.uni-koeln.de"
page = urllib2.urlopen(base_url+"/classic/entries")
soup = BeautifulSoup(page.read(), "lxml")
urls = [] # URLs to CAT and Documentation (metadata) files
des = [] # Text from table entries
for tr in soup.find_all('tr')[1:]:
des.append([col.text for col in tr.find_all('td')])
urls.append([a['href'] for a in tr.find_all('a')])
page.close() # Close HTML sock
compiled = [] # 0 --> tag, 1 --> Molecule, 2 --> struct_time obj, 3 --> cat file, 4 --> metadata
for i, entry in enumerate(urls):
date = des[i][6].strip()
date_formats = ['%b. %Y', '%B %Y', '%b %Y', '%B. %Y']
try:
for date_fmt in date_formats:
try:
formatted_date = time.strptime(date, date_fmt)
except ValueError:
continue
else:
break
assert formatted_date
except AssertionError:
print('Date format %s is not supported in code. Please add to date_formats.'%date)
# try: # Because Holger isn't consistent with his date formatting
# formatted_date = time.strptime(date, "%b. %Y")
# except ValueError:
# try:
# formatted_date = time.strptime(date, "%B %Y")
# except ValueError:
# formatted_date = time.strptime(date, "%b %Y")
# except ValueError:
# formatted_date = time.strptime(date, "%B. %Y")
compiled.append([unidrop(des[i][0]).encode('utf-8'), unidrop(des[i][1]).encode('utf-8'),
formatted_date, urls[i][1], urls[i][2]])
compiled.sort(key=lambda x: x[2], reverse=True) # Sorts by update time, most recent first
return compiled
def process_update(mol, entry=None, sql_conn=None):
"""
Flow for process_update:
1) Check metadata, update if needed
2) Set QN formatting (????)
3) Delete CDMS-related linelist from Splatalogue
4) Push new linelist and metadata to Splatalogue
"""
sql_cur = sql_conn.cursor()
# ----------------------------
# METADATA PULL CHECK & UPDATE
# ----------------------------
SPECIES_ID=entry[0]
sql_cur.execute("SHOW columns FROM species_metadata")
db_meta_cols = [tup[0] for tup in sql_cur.fetchall()]
sql_cur.execute("SELECT * from species_metadata WHERE species_id=%s", (SPECIES_ID,))
results = sql_cur.fetchall()
MetadataMalformed = False
if len(results) == 1:
db_meta = results[0]
db_meta = {key:value for key, value in zip(db_meta_cols, db_meta)}
elif len(results) > 1 and any([res[4] for res in results]): # There's more than one linelist associated with the chosen species_id and linelist ID
chc = ['date: %s \t list: %s \t v2.0: %s \t v3.0: %s' % (a[4], a[55], a[57], a[59]) for a in results]
print('Linelist choices: ', chc)
user_chc = eg.choicebox("Choose an entry to update (CDMS linelist = 10)", "Entry list", chc)
idx = 0
for i, entry in enumerate(chc):
if user_chc == entry:
idx = i
break
db_meta = results[idx]
db_meta = {key:value for key, value in zip(db_meta_cols, db_meta)}
else: # Species exists but there are no metadata entries, so we can have to populate a new one
db_meta = {}
MetadataMalformed = True
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
db_meta[col_name] = mol.metadata[col_name]
else:
continue
mol.metadata['LineList'] = mol.ll_id
mol.metadata['species_id_noparens'] = mol.s_name_noparens
if len(results) >= 1:
metadata_push_answer = eg.buttonbox(msg='Do you want to APPEND or REPLACE a new metadata entry, or DO NOTHING? Do nothing if you are merely adding a hyperfine linelist to an existing entry.', choices=['APPEND', 'REPLACE', 'DO NOTHING'])
if metadata_push_answer == 'APPEND':
push_metadata_flag = 'APPEND'
elif metadata_push_answer == 'REPLACE':
push_metadata_flag = 'REPLACE'
else:
push_metadata_flag = 'NO'
else:
push_metadata_flag = 'APPEND'
append_lines = eg.buttonbox(msg='Do you want to append the linelist, or replace the current linelist in the database?', choices=['Append', 'Replace'])
if append_lines == 'Append' or not append_lines:
append_lines = True
elif append_lines == 'Replace':
append_lines = False
try:
if db_meta['LineList'] != mol.ll_id:
mol.metadata['LineList'] = mol.ll_id
except KeyError: # Only catches when species exists but metadata doesn't
mol.metadata['Linelist'] = mol.ll_id
db_meta['LineList'] = mol.ll_id
# Only entry in database isn't from the linelist of the entry that user wants to update
mol.metadata['v1_0'] = '0'
mol.metadata['v2_0'] = '0'
mol.metadata['v3_0'] = '3'
mol.metadata['v4_0'] = '0'
new_name = eg.enterbox(msg="Do you want to change the descriptive metadata molecule name? "
"Leave blank otherwise. Current name is %s"
% mol.metadata['Name'], title="Metadata Name Change")
if new_name is not '':
mol.metadata['Name'] = new_name
elif not MetadataMalformed:
mol.metadata['Name'] = db_meta['Name']
# Check to see first column to place reference info
# ref_idx = 1
# while True:
# if not db_meta['Ref%s'%ref_idx]:
# break
# ref_idx += 1
#mol.metadata['Ref%s'%ref_idx] = mol.metadata.pop('Ref1')
mol.metadata['Ref20'] = '<a href=' + "\"" + 'http://cdms.astro.uni-koeln.de'+mol.meta_url + "\"" + " target=\"_blank\">CDMS Entry</a>"
mol.metadata['Ref19'] = mol.metadata['Ref20'].replace('cdmsinfo?file=e','cdmssearch?file=c').replace('Entry', 'CAT file')
# meta_fields = ['%s \t %s' %(a[0],a[1]) for a in zip(db_meta_cols, db_meta) if 'Ref' not in a[0]]
sql_cur.execute("SHOW columns FROM species")
db_species_cols = [tup[0] for tup in sql_cur.fetchall()]
sql_cur.execute("SELECT * from species WHERE species_id=%s", (SPECIES_ID,))
db_species = sql_cur.fetchall()[0]
if db_meta['LineList'] != mol.ll_id or MetadataMalformed:
species_entry_dict = {key: value for (key, value) in [(db_species_cols[i], val) for i, val
in enumerate(db_species)]}
ism_set = ('ism_hotcore', 'ism_diffusecloud', 'comet', 'extragalactic', 'known_ast_molecules')
ism_set_dict = {key: value for (key, value) in [(key, species_entry_dict[key]) for key in ism_set]}
if any([val == '1' for val in ism_set_dict.values()]):
mol.metadata['ism'] = 1
else:
mol.metadata['ism'] = 0
ism_overlap_tags = ['ism_hotcore', 'comet', 'planet', 'AGB_PPN_PN', 'extragalactic']
for tag in ism_overlap_tags:
mol.metadata[tag] = species_entry_dict[tag]
mol.metadata['ism_diffuse'] = species_entry_dict['ism_diffusecloud']
mol.metadata['species_id'] = species_entry_dict['species_id']
mol.metadata['species_id_noparens'] = mol.s_name_noparens
# for row in zip(db_meta_cols, db_meta):
# print row[0],'\t',row[1]
# sql_cur.execute("SELECT * from species_metadata WHERE species_id=%s and v1_0=%s and v2_0=%s",
# (db_meta[0], mol.ll_id, db_meta[53], db_meta[54]))
if db_meta['LineList'] == mol.ll_id or not MetadataMalformed:
metadata_to_push = {}
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
metadata_to_push[col_name] = mol.metadata[col_name]
#elif db_meta[col_name] is not None:
# metadata_to_push[col_name] = db_meta[col_name]
else: # Hacky fix to ensure clean columns -- this cleans up columns with no default values that don't allow NULL or are values that aren't otherwise filled in by this routine
if col_name in ['ism', 'species_id', 'LineList']:
metadata_to_push[col_name] = db_meta[col_name]
else:
metadata_to_push = mol.metadata
# Generate new unique ID for metadata entry
try:
sql_cur.execute('SELECT MAX(line_id) FROM species_metadata')
except: # line_id doesn't exist in the database so just skip this step
pass
else:
if push_metadata_flag == 'APPEND':
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no metadata entries in the table, thus line_id should be "1".
metadata_to_push['line_id'] = 1
elif push_metadata_flag == 'REPLACE':
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0]))
except TypeError:
metadata_to_push['line_id'] = 1
# for key in metadata_to_push:
# print '%s: %s' %(key, metadata_to_push[key])
# QN formatting --- let's just do it on a case-by-case basis
qn_fmt = mol.cat['qn_code'][0]
fmtted_qns = []
print 'Preparing linelist...'
# Iterate through rows and add formatted QN
choice_idx = None
for idx, row in mol.cat.iterrows():
format, choice_idx = format_it(qn_fmt, row.filter(regex=re.compile('(qn_)'+'.*?'+'(_)'+'(\\d+)')),
choice_idx=choice_idx)
fmtted_qns.append(format)
# Push formatted quantum numbers to linelist
mol.cat['resolved_QNs'] = pd.Series(fmtted_qns, index=mol.cat.index)
if any(mol.cat['resolved_QNs'] == ''):
print '======================\n'+'WARNING: The parsing code did not parse the quantum numbers. This may be due to the CAT QN code not being programmed into QNParser, but also might be due to you choosing not to parse the QNs.\n Please contact your friendly code developer (Nathan) if you need help in this regard.\n'+'======================'
if metadata_to_push['ism'] == 1:
mol.cat['Lovas_NRAO'] = 1
else:
mol.cat['Lovas_NRAO'] = 0
# mol.cat['Lovas_NRAO'] = pd.Series(np.ones(len(mol.cat.index)), index=mol.cat.index)
# Prep linelist for submission to
sql_cur.execute("SHOW columns FROM main")
ll_splat_col_list = [tup[0] for tup in sql_cur.fetchall()]
ll_col_list = mol.cat.columns.values.tolist()
final_cat = mol.cat[[col for col in ll_splat_col_list if col in ll_col_list]]
return final_cat, metadata_to_push, push_metadata_flag, append_lines
def new_molecule(mol, sql_conn=None):
sql_cur = sql_conn.cursor()
# ----------------------------
# METADATA ADD
# ----------------------------
# Generate array of all columns from species_metadata so we can fill them in as we go
sql_cur.execute("SHOW columns FROM species_metadata")
db_meta_cols = [tup[0] for tup in sql_cur.fetchall()]
metadata_to_push = {}
# Fills in metadata dictionary with the column array we generated above as a list of keys for the metadata dict
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
metadata_to_push[col_name] = mol.metadata[col_name]
else:
continue
# Generate new species_id
sql_cur.execute('SELECT MAX(species_id) FROM species')
try: # species_id is +1 of the largest species_id in the species table
metadata_to_push['species_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no species in the table; therefore species ID should be "1".
metadata_to_push['species_id'] = "1"
# Generate new unique ID for metadata entry
try:
sql_cur.execute('SELECT MAX(line_id) FROM species_metadata')
except: # line_id doesn't exist in the database so just skip this step
pass
else:
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no metadata entries in the table, thus line_id should be "1".
metadata_to_push['line_id'] = 1
# Odds and ends; we default to v4_0 for splat_2019
metadata_to_push['v1_0'] = '0'
metadata_to_push['v2_0'] = '0'
metadata_to_push['v3_0'] = '3'
#metadata_to_push['v4_0'] = '4'
metadata_to_push['Ref20'] = '<a href=' + "\"" + 'http://cdms.astro.uni-koeln.de'+mol.meta_url + "\"" + " target=\"_blank\">CDMS Entry</a>"
metadata_to_push['Ref19'] = metadata_to_push['Ref20'].replace('cdmsinfo?file=e','cdmssearch?file=c').replace('Entry', 'CAT file')
metadata_to_push['LineList'] = mol.ll_id
metadata_to_push['species_id_noparens'] = mol.s_name_noparens
# If you want to give the molecule a pretty new, or non-standard, name
new_name = eg.enterbox(msg="Do you want to change the descriptive metadata molecule name?"
" Leave blank otherwise. Current name is %s"
% metadata_to_push['Name'], title="Metadata Name Change")
if new_name is not '':
metadata_to_push['Name'] = new_name
# Generates new splat_id from the molecular mass
cmd = "SELECT SPLAT_ID FROM species " \
"WHERE SPLAT_ID LIKE '%s%%'" % str(mol.tag[:3])
sql_cur.execute(cmd)
splat_id_list = sql_cur.fetchall()
# If there's more than one species in the splatalogue list with the same molecular mass, this finds the minimum value necessary to make it a unique ID
if len(splat_id_list) > 0:
max_id = max([int(x[0][3:]) for x in splat_id_list])
if max_id < 9:
splat_id = mol.tag[:3] + '0'+str(max_id+1)
else:
splat_id = mol.tag[:3] + str(max_id+1)
else:
splat_id = mol.tag[:3] + '01'
# Self-explanatory: This is where we build the species row entry
species_to_push = OrderedDict([('species_id', metadata_to_push['species_id']),
('name', mol.formula), ('chemical_name', None), ('s_name', mol.s_name),
('s_name_noparens', mol.s_name_noparens),
('SPLAT_ID', splat_id), ('atmos', '0'), ('potential', '1'), ('probable', '0'),
('known_ast_molecules', '0'), ('planet', '0'), ('ism_hotcore', '0'),
('ism_diffusecloud', '0'), ('comet', '0'), ('extragalactic', '0'),
('AGB_PPN_PN', '0'), ('SLiSE_IT', '0'), ('Top20', '0')])
species_choices_fieldnames = ['%s (%s)' % (key, value) for key, value in species_to_push.items()]
species_choices = eg.multenterbox('Set species entries', 'species entry', species_choices_fieldnames)
# Ensures we keep a 1-1 correspondence between species_to_push and users' entries from above
for idx, key in enumerate(species_to_push):
if not species_choices[idx]: # If user entry is empty, we do nothing
pass
else:
species_to_push[key] = species_choices[idx]
# Interstellar values; instantiated separately so we can correlate between metadata and species ISM tags
ism_set = ('ism_hotcore', 'ism_diffusecloud', 'comet', 'extragalactic', 'known_ast_molecules')
ism_set_dict = {key: value for (key, value) in [(key, species_to_push[key]) for key in ism_set]}
# If it's an ISM detection, we probably want to make the freqs NRAO recommended ('Lovas_NRAO' column in main).
# This may need to be changed in the future, but this was decided under agreement with <NAME>
if any([val == '1' for val in ism_set_dict.values()]):
metadata_to_push['ism'] = 1
mol.cat['Lovas_NRAO'] = 1 # Sets all lines in linelist to be NRAO recommended if it's a detected molecule
else:
metadata_to_push['ism'] = 0
# ISM tag overlap between metadata and species tables
ism_overlap_tags = ['ism_hotcore', 'comet', 'planet', 'AGB_PPN_PN', 'extragalactic', 'species_id',]
for tag in ism_overlap_tags:
metadata_to_push[tag] = species_to_push[tag]
# Format quantum numbers
qn_fmt = mol.cat['qn_code'][0]
fmtted_qns = []
# Iterate through rows and add formatted QN
choice_idx = None
for idx, row in mol.cat.iterrows():
format, choice_idx = format_it(qn_fmt, row.filter(regex=re.compile('(qn_)'+'.*?'+'(_)'+'(\\d+)')),
choice_idx=choice_idx) # See QNFormat.py for this function
fmtted_qns.append(format)
# Push formatted quantum numbers to linelist
mol.cat['resolved_QNs'] = pd.Series(fmtted_qns, index=mol.cat.index)
if any(mol.cat['resolved_QNs'] == ''):
print '======================\n'+'WARNING: The parsing code did not parse the quantum numbers. This may be due to the CAT QN code not being programmed into QNParser, but also might be due to you choosing not to parse the QNs.\n Please contact your friendly code developer (Nathan) if you need help in this regard.\n'+'======================'
# Prep linelist for submission to database
sql_cur.execute("SHOW columns FROM main")
ll_splat_col_list = [tup[0] for tup in sql_cur.fetchall()] # Generates a list of columns from the main table
ll_col_list = mol.cat.columns.values.tolist() # Kick it out of the dataframe so we can throw it into a list, which mysqldb can handle
final_cat = mol.cat[[col for col in ll_splat_col_list if col in ll_col_list]] # Gets rid of dataframe columns NOT in the main column list
return final_cat, species_to_push, metadata_to_push
def push_molecule(db, ll, spec_dict, meta_dict, push_metadata_flag='APPEND', append_lines_flag=False, update=0):
# push_molecule() takes all the prepared entry data from either new_molecule() or process_update() and pushes it to the database
for key in meta_dict: # For your viewing pleasure
print key, '\t', meta_dict[key]
print 'Converting linelist for SQL insertion...'
ll['species_id'] = meta_dict['species_id'] # Copies species_id from metadata, where it's first generated, into the linelist
ll = ll.where(pd.notnull(ll),None)
ll_dict = [tuple(x) for x in ll.to_numpy()]
num_entries = len(ll_dict)
#ll_dict = [(None if pd.isnull(y) else y for y in x) for x in ll.to_numpy()] # Ensures NULL values for empty rows
# Create new species entry in database
# Helper functions to generate well-formed SQL INSERT statements
def placeholders(inp): return ', '.join(['%s'] * len(inp))
def placeholders_err(inp): return ', '.join(['{}'] * len(inp))
def columns(inp): return ', '.join(inp.keys())
def query(table, inp): return "INSERT INTO %s ( %s ) VALUES ( %s )" % (table, columns(inp), placeholders(inp))
def query_err(table, inp): "INSERT INTO %s ( %s ) VALUES ( %s )" % \
(table, columns(inp), placeholders_err(inp))
# Add some last minute entries to dictionaries
spec_dict['created'] = time.strftime('%Y-%m-%d %H:%M:%S')
spec_dict['nlines'] = str(len(ll.index))
spec_dict['version'] = '1'
spec_dict['resolved'] = '1'
if update:
# Update a few things in species column
print 'Updating species table...'
cursor = db.cursor()
cursor.execute('UPDATE species SET created=%s WHERE species_id=%s',
(spec_dict['created'], meta_dict['species_id']))
cursor.execute('UPDATE species SET nlines=%s WHERE species_id=%s',
(spec_dict['nlines'], meta_dict['species_id']))
# Replace old NRAO recommended frequency tags so the updated data set becomes the new NRAO rec
if meta_dict['ism'] == 1:
print 'Removing previous Lovas NRAO recommended frequencies, if necessary...'
cursor.execute('UPDATE main SET Lovas_NRAO = 0 WHERE species_id=%s', (meta_dict['species_id'],))
if push_metadata_flag == 'REPLACE':
print 'Removing duplicate metadata, if neeeded...' # In case of duplicate data
cursor.execute('DELETE FROM species_metadata WHERE species_id=%s AND LineList=%s AND v3_0 = 3', (meta_dict['species_id'], meta_dict['LineList']))
if not append_lines_flag:
print 'Removing previous current version lines if available...' # Prevents doubling of entries, such as in case of an accidental update
cursor.execute('DELETE FROM main WHERE species_id=%s AND `v3.0`=3 AND ll_id=%s', (meta_dict['species_id'], meta_dict['LineList']))
cursor.close()
else:
cursor = db.cursor()
print 'Creating new entry in species table...'
try:
cursor.execute(query("species", spec_dict), spec_dict.values())
except sqldb.ProgrammingError:
print "The following query failed: "
print query_err("species", spec_dict).format(*spec_dict.values())
print 'Finished successfully. Created entry for %s with species_id %s and SPLAT_ID %s \n' \
% (spec_dict['name'], spec_dict['species_id'], spec_dict['SPLAT_ID'])
cursor.close()
### This commented block of code enables replacement of older metadata content in species_metadata if updating an entry;
### current behavior is to merely append the new metadata content to species_metadata and preserve older versions.
# if update:
# cursor = db.cursor()
# print 'Removing original metadata entry for replacing with new data...'
# cursor.execute('DELETE from species_metadata WHERE species_id=%s AND v1_0=%s AND v2_0=%s AND LineList=%s',
# (meta_dict['species_id'], meta_dict['v1_0'], meta_dict['v2_0'], meta_dict['LineList']))
# print 'Removing original linelist for replacement...'
# cursor.execute('DELETE from main WHERE species_id=%s AND ll_id=%s',
# (meta_dict['species_id'], meta_dict['LineList']))
# cursor.close()
# Create new metadata entry in database
if (update == 0) or (update == 1 and push_metadata_flag in ('APPEND','REPLACE')):
cursor = db.cursor()
print 'Creating new entry in metadata table...'
try:
cursor.execute(query("species_metadata", meta_dict), meta_dict.values())
except sqldb.ProgrammingError:
print "The folllowing query failed: "
print query_err("species_metadata", meta_dict).format(*meta_dict.values())
print 'Finished successfully.\n'
cursor.close()
print 'Pushing linelist (%s entries) to database...' % num_entries
cursor = db.cursor()
# Generates a giant INSERT query for all rows in the linelist.
# This is a MUCH faster process than having Python loop through each row and insert it manually.
query_ll = "INSERT INTO %s ( %s ) VALUES ( %s )" \
% ("main", ', '.join(ll.columns.values), placeholders(ll.columns.values))
try:
cursor.executemany(query_ll, ll_dict)
except sqldb.ProgrammingError:
print 'Pushing linelist failed.'
except TypeError:
raise
cursor.close()
db.commit()
print 'Finished with linelist push.'
def main(db):
# ------------------
# POPULATE CDMS LIST
# ------------------
pd.options.mode.chained_assignment = None
print 'Pulling updates from CDMS...'
update_list = pull_updates()
choice_list = [CDMSChoiceList([str(i)]+update_list[i]) for i in range(len(update_list))]
# RUN PROCESS
CDMSLoop = True
while CDMSLoop:
cursor = db.cursor()
choice = eg.choicebox("Choose a Molecule to Update", "Choice", choice_list)
custom_cat_file = eg.buttonbox(msg='Would you like to supply a custom CAT file?', choices=['Yes', 'No'])
if custom_cat_file == 'Yes':
custom_path = eg.fileopenbox(msg='Please select a CAT file.', title='Custom CAT file')
cat_entry = CDMSMolecule(update_list[int(choice[0:5])], custom=True, custom_path=custom_path)
else:
cat_entry = CDMSMolecule(update_list[int(choice[0:5])], custom=False)
# Queries database for all species with valid "SPLAT IDs"
cmd = "SELECT * FROM species " \
"WHERE SPLAT_ID LIKE '%s%%'" % str(cat_entry.tag[:3])
cursor.execute(cmd)
res = cursor.fetchall()
# Hacky way to get easygui to correlate the mysql query output rows to rows in the GUI molecule list
splatmolresults = [SplatSpeciesResultList([i]+list(x)) for i, x in enumerate(res)]
splatmolresults += [SplatSpeciesResultList([len(splatmolresults),999999999,0,'NEW MOLECULE',
'X', '', '', '', '', '', ''])]
choice2 = eg.choicebox("Pick molecule from Splatalogue to update, or create a new molecule.\n "
"Current choice is:\n %s" % choice, "Splatalogue Search Results",
splatmolresults)
cursor.close()
if choice2[68] == 'X': # New molecule
linelist, species_final, metadata_final = new_molecule(cat_entry, db)
push_molecule(db, linelist, species_final, metadata_final, update=0)
else: # Molecule already exists in Splatalogue database
linelist, metadata_final, push_metadata_flag, append_lines_flag = process_update(cat_entry, res[int(choice2[0:5])], db)
push_molecule(db=db, ll=linelist, spec_dict={}, meta_dict=metadata_final, push_metadata_flag=push_metadata_flag, append_lines_flag=append_lines_flag, update=1)
choice3 = eg.buttonbox(msg='Do you want to update another CDMS entry?', choices=['Yes', 'No'])
if choice3 == 'No':
CDMSLoop = False
|
import urllib2
from bs4 import BeautifulSoup
import time
import numpy as np
import pandas as pd
from itertools import izip_longest
from collections import OrderedDict
import re
import MySQLdb as sqldb
import easygui as eg
from QNFormat import *
import sys
import os
class CDMSMolecule:
def parse_cat(self, cat_url=None, local=0):
""" This function takes a Pickett prediction file (a so-called "CAT" file) and converts it into a Pandas DataFrame.
This code should work for any well-formed CAT file, and works for all CDMS and JPL entries, as well as custom, user-generated
CAT files. It is unclear if there are any edge cases this misses as CAT files are fairly rigorous in their formatting.
"""
num_qns = 0
def l_to_idx(letter): # For when a QN > 99
_abet = 'abcdefghijklmnopqrstuvwxyz'
return next((z for z, _letter in enumerate(_abet) if _letter == letter.lower()), None)
# Generates a parsing string formatter for CAT file rows
def make_parser(fieldwidths):
def accumulate(iterable):
total = next(iterable)
yield total
for value in iterable:
total += value
yield total
cuts = tuple(cut for cut in accumulate(abs(fw) for fw in fieldwidths))
pads = tuple(fw < 0 for fw in fieldwidths) # bool for padding
flds = tuple(izip_longest(pads, (0,)+cuts, cuts))[:-1] # don't need final one
def parse(lyne): return tuple(lyne[i:j] for pad, i, j in flds if not pad)
parse.size = sum(abs(fw) for fw in fieldwidths)
parse.fmtstring = ' '.join('{}{}'.format(abs(fw), 'x' if fw < 0 else 's') for fw in fieldwidths)
return parse
widths = [13, 8, 8, 2, 10, 3, 7, 4] # Character widths for each CAT file entry, not including quantum numbers
w_sum = sum(widths)
parser = make_parser(tuple(widths))
try:
print '========\n'+ cat_url.name + '\n========\n'
except AttributeError: # cat_url is a string:
print '========\n'+ cat_url + '\n========\n'
if local == 0:
cat_inp = urllib2.urlopen(cat_url).read()
# Save cat_inp to working directory
with open(self.working_directory+'/'+self.tag+"_"+self.name+'.cat','wb') as otpt:
otpt.write(cat_inp)
# Split by line to ready CAT file for parse
cat_inp = cat_inp.split('\n')
else:
cat_inp = cat_url.read().split('\n')
initial_list = []
j = 0
for line in cat_inp: # Parses the CAT file into chunks
if j == 0:
qn_len = len(line)-w_sum
widths.append(qn_len)
parser = make_parser(widths)
initial_list.append(parser(line))
j += 1
# Let's put everything together to put into a dataframe
parsed_list = []
qn_parser = make_parser((2,)*12)
max_qn_length = 0 # For fitting strings into temporary numpy array
for row in initial_list:
if num_qns == 0: # Get number of quantum numbers per state
try:
num_qns = int(row[7][-1])
except IndexError: # You should never end up here unless there's a crazy edge case or a badly formed CAT file.
print row
raise
# This is a really hacky way to parse the quantum numbers, but it's robust and has worked without a hitch so far.
# Uses a series of if-else statements to iterate through the QNs in a linear fashion
raw_qn = row[-1].rstrip()
if len(raw_qn) > max_qn_length:
max_qn_length = len(raw_qn)
qns = qn_parser(row[-1]) # splits QN entry into pairs
up_done = False # Boolean for being done with the upper state QNs
in_middle = False # Are we in the character gap between the upper and lower state QNs?
down_done = False # Boolean for being down with the lower state QNs
qns_up = []
qns_down = []
down_idx = 0
for i, val in enumerate(qns):
if i == num_qns:
up_done = True
in_middle = True
if up_done and in_middle and val.strip() == '':
continue
if up_done and in_middle and val.strip() != '':
in_middle = False
if down_idx == num_qns:
down_done = True
if not up_done and not in_middle: # Still in the upper state
try:
qns_up.append(int(val))
except ValueError: # In case it isn't an integer quantum number
try:
if val.strip() == '+': # For parity symbols in CH3OH, for instance
qns_up.append(1)
elif val.strip() == '-':
qns_up.append(-1)
elif val.strip() == '': # No parity symbol?
qns_up.append(0)
elif re.search('[A-Z]', val.strip()): # QN > 99
temp = list(val)
qns_up.append((100 + (l_to_idx(temp[0]))*10) + int(temp[1]))
elif re.search('[a-z]', val.strip()): # QN < -9, e.g. CDMS CD3CN entry
temp = list(val)
qns_up.append((-10 - l_to_idx(temp[0])*10) - int(temp[1]))
except TypeError: # You shouldn't ever get here, but just in case...
print i, val, [x.strip() for x in qns]
raise
if up_done and (not down_done and not in_middle): # Hit the beginning of the lower states
down_idx += 1
try:
qns_down.append(int(val))
except ValueError:
try:
if val.strip() == '+':
qns_down.append(1)
elif val.strip() == '-':
qns_down.append(-1)
elif val.strip() == '':
qns_down.append(0)
elif re.search('[A-Z]', val.strip()): # QN > 99
temp = list(val)
qns_down.append((100 + (l_to_idx(temp[0]))*10) + int(temp[1]))
elif re.search('[a-z]', val.strip()): # QN < -9, e.g. CDMS CD3CN entry
temp = list(val)
qns_down.append((-10 - l_to_idx(temp[0])*10) - int(temp[1]))
except TypeError:
print i, val, [x.strip() for x in qns]
raise
try:
parsed_list.append([float(s.strip()) for s in row[:-1]] + [raw_qn] + [qns_up, qns_down])
except ValueError: # Get blank line or other issue?
line = [s.strip() for s in row[:-1]]
if not line[0]: # Blank line
continue
elif any([char.isalpha() for char in line[5]]): # Upper state degeneracy > 99:
line[5] = 1000 + l_to_idx(line[5][0])*100 + int(line[5][1:])
parsed_list.append([float(col) for col in line] + [raw_qn] + [qns_up, qns_down])
# Generates columns for dataframe that correlate with columns in main
dtypes = [('frequency', 'f8'), ('uncertainty', 'f8'), ('intintensity', 'f8'), ('degree_freedom', 'i4'),
('lower_state_energy', 'f8'), ('upper_state_degeneracy', 'i4'), ('molecule_tag', 'i4'),
('qn_code', 'i4'), ('raw_qn', 'S%i'%max_qn_length)]
dtypes.extend([('qn_up_%s' % i, 'i4') for i in range(len(parsed_list[0][-2]))])
dtypes.extend([('qn_dwn_%s' % i, 'i4') for i in range(len(parsed_list[0][-2]))])
final_list = []
for row in parsed_list:
final_list.append(tuple(row[:-2]+row[-2]+row[-1]))
nplist = np.zeros((len(final_list),), dtype=dtypes)
nplist[:] = final_list
return pd.DataFrame(nplist)
# Not used but useful in case you want to append custom lines to a linelist
def add_row(self, row_name, value):
@staticmethod
def add(cat, row, val):
cat[row] = val
return cat
add(self.cat, row_name, value)
def parse_formula(self, input_formula):
common_isotopes = ['13C', '15N','18O','17O','33S','34S','36S', '40Ar', '26Al','30Si','29Si','65Cu','52Cr','66Zn', '68Zn','35Cl','36Cl','37Cl','39K', '40K', '41K','46Ti','50Ti']
# Get rid of any junk after a comma, usually some state descriptor
if ',' in input_formula:
output_formula = input_formula.split(',')[0]
leftovers = ' '.join(input_formula.split(',')[1:])
else:
output_formula = input_formula
leftovers = ''
for isotope in common_isotopes:
# Do isotopes first
if isotope in output_formula:
num_part, element = re.findall(r'[^\W\d_]+|\d+', isotope)
output_formula = output_formula.replace(isotope, '<sup>'+num_part+'</sup>'+element)
# Replace every other number with <sub>
atoms_with_multiplicity = re.findall(r'[A-Z][a-z]*\d+', output_formula)
for atom in atoms_with_multiplicity:
element, num_part = re.findall(r'[^\W\d_]+|\d+', atom)
output_formula = output_formula.replace(atom, element+'<sub>'+num_part+'</sub>',1)
# Add <sub> to any parenthesized subgroup of the formula
parenthetical_subgroups = re.findall(r'\)\d+', output_formula)
for subgroup in parenthetical_subgroups:
output_formula = output_formula.replace(subgroup, ')'+'<sub>'+subgroup.split(')')[1]+'</sub>')
# Now, let's build s_name and s_name_noparens
s_name = output_formula.replace('<sup>','(').replace('</sup>', ')').replace('<sub>','').replace('</sub>','')
s_name_noparens = s_name.replace('(','').replace(')','')
return output_formula+leftovers, s_name+leftovers, s_name_noparens+leftovers
# Scrapes CDMS site to generate metadata
def get_metadata(self, meta_url):
print self.name
metadata = {} # Dictionary for metadata, keys are consistent with columns in SQL
# Dictionaries to connect string values in CDMS metadata to SQL columns
q_temps = {'2000.': 'Q_2000_', '1000.': 'Q_1000_', '500.0': 'Q_500_0', '300.0': 'Q_300_0',
'225.0': 'Q_225_0', '150.0': 'Q_150_0', '75.00': 'Q_75_00', '37.50': 'Q_37_50',
'18.75': 'Q_18_75', '9.375': 'Q_9_375', '5.000': 'Q_5_00', '2.725': 'Q_2_725'}
q_temps_alt = {'2000.': 'Q_2000_', '1000.': 'Q_1000_', '500.': 'Q_500_0', '300.': 'Q_300_0',
'225.0': 'Q_225_0', '150.': 'Q_150_0', '075.': 'Q_75_00', '37.50': 'Q_37_50',
'18.75': 'Q_18_75', '9.375': 'Q_9_375', '5.000': 'Q_5_00', '2.725': 'Q_2_725'} # Bullshit workaround to fix some non-standard stuff in Holger's entries
dipoles = {'a / D': 'MU_A', 'b / D': 'MU_B', 'c / D': 'MU_C'}
# Initialize scraper
meta_page = urllib2.urlopen(meta_url)
meta_page_read = meta_page.read()
with open(self.working_directory+'/'+self.tag+"_"+self.name+'.html','wb') as otpt:
otpt.write(meta_page_read)
soup = BeautifulSoup(meta_page_read, 'lxml')
# Grab formula
formula = soup.find_all('caption')[0].get_text().split('\n')[0].encode('utf-8')
# Need to add lit data / dipoles etc
meta_page.close()
table = soup.find_all('tr')
for entry in table: # Right now it seems the best way is to brute force this
temp = entry.get_text()
metadata['Name'] = self.name
metadata['Date'] = time.strftime('%b. %Y', self.date)
if 'Contributor' in temp:
if self.ll_id == '10':
metadata['Contributor'] = '<NAME>'
else:
metadata['Contributor'] = temp.split('Contributor')[1].encode('utf-8')
# Pull out spin-rotation partition function values
for key,key2 in zip(q_temps,q_temps_alt):
if 'Q(%s)' % key in temp:
metadata[q_temps[key].encode('utf-8')] = temp.split('Q(%s)' % key)[1].encode('utf-8')
elif 'Q(%s)' % key2 in temp:
metadata[q_temps_alt[key2].encode('utf-8')] = temp.split('Q(%s)' % key2)[1].encode('utf-8')
def value_check(x): return any(i.isdigit() for i in x)
def pull_float(x): return re.findall(r'\d+.\d+', x)
for key in dipoles:
if key in temp:
if value_check(temp) and 'Q(' not in temp:
metadata[dipoles[key]] = pull_float(temp)[0].encode('utf-8')
if ('/ MHz' in temp or re.findall(r'[A-C]\d.\d+', temp)) and 'Q(' not in temp:
if value_check(temp):
if 'A' in temp:
metadata['A'] = pull_float(temp)[0].encode('utf-8')
if 'B' in temp:
metadata['B'] = pull_float(temp)[0].encode('utf-8')
if 'C' in temp:
metadata['C'] = pull_float(temp)[0].encode('utf-8')
metadata['Ref1'] = str(soup.find_all('p')[0]).replace('\n', ' ')
# Some hard-coded replace statements for weird things that don't parse correctly when displaying the metadata
metadata['Ref1'] = metadata["Ref1"].replace('\xc2\x96','-') # Fixes long dashes that Holger sometimes likes to use
return self.parse_formula(formula), metadata
# Calculates all derived parameters from data in the CAT file, e.g. lower/upper state energies, sijmu2 values, etc.
# Currently does NOT calculate sij values, because of the case-by-case, or even line-by-line, difficulty on how to identify the electric dipole to divide by
@staticmethod
def calc_derived_params(cat, metadata):
try:
q_spinrot = float(metadata['Q_300_0'])
except ValueError: # in case there's multiple numbers
q_spinrot = float(metadata['Q_300_0'].split('(')[0])
kt_300_cm1 = 208.50908
cat['sijmu2'] = 2.40251E4 * 10**(cat['intintensity']) * q_spinrot * (1./cat['frequency']) * \
(1./(np.exp(-1.0*cat['lower_state_energy']/kt_300_cm1) -
np.exp(-1.0*(cat['frequency']/29979.2458+cat['lower_state_energy'])/kt_300_cm1)))
cat['aij'] = np.log10(1.16395E-20*cat['frequency']**3*cat['sijmu2']/cat['upper_state_degeneracy'])
cat['lower_state_energy_K'] = cat['lower_state_energy']*1.4387863
cat['upper_state_energy'] = cat['lower_state_energy'] + cat['frequency']/29979.2458
cat['upper_state_energy_K'] = cat['upper_state_energy']*1.4387863
cat['error'] = cat['uncertainty']
cat['roundedfreq'] = cat['frequency'].round(0)
cat['line_wavelength'] = 299792458./(cat['frequency']*1.0E6)*1000
cat['quantum_numbers'] = cat['raw_qn']
# Add measured freqs and then ordered frequencies
cat['measfreq'] = np.nan
cat['orderedfreq'] = np.nan
cat['measerrfreq'] = np.nan
mask_meas = (cat['molecule_tag'] < 0)
mask_pred = (cat['molecule_tag'] > 0)
cat['measfreq'][mask_meas] = cat['frequency'][mask_meas]
cat['frequency'][mask_meas] = np.nan
cat['orderedfreq'][mask_meas] = cat['measfreq'][mask_meas]
cat['measerrfreq'][mask_meas] = cat['uncertainty'][mask_meas]
cat['orderedfreq'][mask_pred] = cat['frequency'][mask_pred]
cat['transition_in_space'] = '0'
return cat
def create_directory(self):
save_path = 'working_molecules/'
folder_name = 'CDMS_'+self.tag+"_"+self.name+'_'+time.strftime('%b%Y', self.date)
total_path = save_path+folder_name
# Check to see if folder already exists; if so, we'll append an integer to it
if os.path.isdir(total_path):
# There might be more than 1, so we should add +1 to the tally if so
dupe_idx = 1
while os.path.isdir(total_path+'_{:d}'.format(dupe_idx)):
dupe_idx += 1
total_path = save_path+folder_name+'_{:d}'.format(dupe_idx)
try:
os.makedirs(total_path)
except OSError:
print('Creation of directory %s failed' %(total_path,))
else:
print('Created working directory for molecular information at: %s' %(total_path,))
return total_path
def __init__(self, cdms_inp, custom=False, ll_id='10', custom_path="", write_directory=True):
base_url = "http://cdms.astro.uni-koeln.de"
self.tag = cdms_inp[0]
self.name = cdms_inp[1]
self.date = cdms_inp[2]
self.cat_url = cdms_inp[3]
self.meta_url = cdms_inp[4]
self.ll_id = ll_id
if write_directory:
self.working_directory = self.create_directory()
if custom:
self.cat = self.parse_cat(cat_url=open(custom_path, 'r'), local=1)
else:
self.cat = self.parse_cat(cat_url=base_url+self.cat_url)
(self.formula, self.s_name, self.s_name_noparens), self.metadata = self.get_metadata(base_url+self.meta_url)
self.cat = self.calc_derived_params(self.cat, self.metadata)
self.cat['ll_id'] = self.ll_id
self.cat['`v3.0`'] = '3'
# Write parsed CAT dataframe to CSV file
self.cat.to_csv(path_or_buf=self.working_directory+'/'+self.tag+"_"+self.name+'_parsed_cat.csv')
for key in self.metadata:
print key, ': ', self.metadata[key]
class SplatSpeciesResultList(list):
def __new__(cls, data=None):
obj = super(SplatSpeciesResultList, cls).__new__(cls, data)
return obj
def __str__(self):
it = list(self)
it[0] = "0"*(4-len(str(it[0])))+str(it[0])
return "{:5} {:10} {:10} {:>25} {:>15}".format(it[0], it[1], it[5], it[3], it[4])
class CDMSChoiceList(list):
def __new__(cls, data=None):
obj = super(CDMSChoiceList, cls).__new__(cls, data)
return obj
def __str__(self):
it = list(self)
it[0] = "0"*(4-len(it[0]))+it[0]
return "{:5} {:10} {:>25} {:>15}".format(it[0], it[1], it[2], time.strftime("%B %Y", it[3]))
def unidrop(x): # Strips any non-ASCII unicode text from string
return re.sub(r'[^\x00-\x7F]+', ' ', x)
def pretty_print(comp):
form = "{:5}\t{:45}\t{:15}\t{:40} {:40}"
output = form.format(*('Tag', 'Molecule', 'Date', 'Cat Link', 'Metadata Link'))+'\n'
for row in comp:
output += form.format(*(row[0], row[1], time.strftime("%B %Y", row[2]), row[3], row[4]))+'\n'
return output
def pull_updates():
base_url = "http://cdms.astro.uni-koeln.de"
page = urllib2.urlopen(base_url+"/classic/entries")
soup = BeautifulSoup(page.read(), "lxml")
urls = [] # URLs to CAT and Documentation (metadata) files
des = [] # Text from table entries
for tr in soup.find_all('tr')[1:]:
des.append([col.text for col in tr.find_all('td')])
urls.append([a['href'] for a in tr.find_all('a')])
page.close() # Close HTML sock
compiled = [] # 0 --> tag, 1 --> Molecule, 2 --> struct_time obj, 3 --> cat file, 4 --> metadata
for i, entry in enumerate(urls):
date = des[i][6].strip()
date_formats = ['%b. %Y', '%B %Y', '%b %Y', '%B. %Y']
try:
for date_fmt in date_formats:
try:
formatted_date = time.strptime(date, date_fmt)
except ValueError:
continue
else:
break
assert formatted_date
except AssertionError:
print('Date format %s is not supported in code. Please add to date_formats.'%date)
# try: # Because Holger isn't consistent with his date formatting
# formatted_date = time.strptime(date, "%b. %Y")
# except ValueError:
# try:
# formatted_date = time.strptime(date, "%B %Y")
# except ValueError:
# formatted_date = time.strptime(date, "%b %Y")
# except ValueError:
# formatted_date = time.strptime(date, "%B. %Y")
compiled.append([unidrop(des[i][0]).encode('utf-8'), unidrop(des[i][1]).encode('utf-8'),
formatted_date, urls[i][1], urls[i][2]])
compiled.sort(key=lambda x: x[2], reverse=True) # Sorts by update time, most recent first
return compiled
def process_update(mol, entry=None, sql_conn=None):
"""
Flow for process_update:
1) Check metadata, update if needed
2) Set QN formatting (????)
3) Delete CDMS-related linelist from Splatalogue
4) Push new linelist and metadata to Splatalogue
"""
sql_cur = sql_conn.cursor()
# ----------------------------
# METADATA PULL CHECK & UPDATE
# ----------------------------
SPECIES_ID=entry[0]
sql_cur.execute("SHOW columns FROM species_metadata")
db_meta_cols = [tup[0] for tup in sql_cur.fetchall()]
sql_cur.execute("SELECT * from species_metadata WHERE species_id=%s", (SPECIES_ID,))
results = sql_cur.fetchall()
MetadataMalformed = False
if len(results) == 1:
db_meta = results[0]
db_meta = {key:value for key, value in zip(db_meta_cols, db_meta)}
elif len(results) > 1 and any([res[4] for res in results]): # There's more than one linelist associated with the chosen species_id and linelist ID
chc = ['date: %s \t list: %s \t v2.0: %s \t v3.0: %s' % (a[4], a[55], a[57], a[59]) for a in results]
print('Linelist choices: ', chc)
user_chc = eg.choicebox("Choose an entry to update (CDMS linelist = 10)", "Entry list", chc)
idx = 0
for i, entry in enumerate(chc):
if user_chc == entry:
idx = i
break
db_meta = results[idx]
db_meta = {key:value for key, value in zip(db_meta_cols, db_meta)}
else: # Species exists but there are no metadata entries, so we can have to populate a new one
db_meta = {}
MetadataMalformed = True
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
db_meta[col_name] = mol.metadata[col_name]
else:
continue
mol.metadata['LineList'] = mol.ll_id
mol.metadata['species_id_noparens'] = mol.s_name_noparens
if len(results) >= 1:
metadata_push_answer = eg.buttonbox(msg='Do you want to APPEND or REPLACE a new metadata entry, or DO NOTHING? Do nothing if you are merely adding a hyperfine linelist to an existing entry.', choices=['APPEND', 'REPLACE', 'DO NOTHING'])
if metadata_push_answer == 'APPEND':
push_metadata_flag = 'APPEND'
elif metadata_push_answer == 'REPLACE':
push_metadata_flag = 'REPLACE'
else:
push_metadata_flag = 'NO'
else:
push_metadata_flag = 'APPEND'
append_lines = eg.buttonbox(msg='Do you want to append the linelist, or replace the current linelist in the database?', choices=['Append', 'Replace'])
if append_lines == 'Append' or not append_lines:
append_lines = True
elif append_lines == 'Replace':
append_lines = False
try:
if db_meta['LineList'] != mol.ll_id:
mol.metadata['LineList'] = mol.ll_id
except KeyError: # Only catches when species exists but metadata doesn't
mol.metadata['Linelist'] = mol.ll_id
db_meta['LineList'] = mol.ll_id
# Only entry in database isn't from the linelist of the entry that user wants to update
mol.metadata['v1_0'] = '0'
mol.metadata['v2_0'] = '0'
mol.metadata['v3_0'] = '3'
mol.metadata['v4_0'] = '0'
new_name = eg.enterbox(msg="Do you want to change the descriptive metadata molecule name? "
"Leave blank otherwise. Current name is %s"
% mol.metadata['Name'], title="Metadata Name Change")
if new_name is not '':
mol.metadata['Name'] = new_name
elif not MetadataMalformed:
mol.metadata['Name'] = db_meta['Name']
# Check to see first column to place reference info
# ref_idx = 1
# while True:
# if not db_meta['Ref%s'%ref_idx]:
# break
# ref_idx += 1
#mol.metadata['Ref%s'%ref_idx] = mol.metadata.pop('Ref1')
mol.metadata['Ref20'] = '<a href=' + "\"" + 'http://cdms.astro.uni-koeln.de'+mol.meta_url + "\"" + " target=\"_blank\">CDMS Entry</a>"
mol.metadata['Ref19'] = mol.metadata['Ref20'].replace('cdmsinfo?file=e','cdmssearch?file=c').replace('Entry', 'CAT file')
# meta_fields = ['%s \t %s' %(a[0],a[1]) for a in zip(db_meta_cols, db_meta) if 'Ref' not in a[0]]
sql_cur.execute("SHOW columns FROM species")
db_species_cols = [tup[0] for tup in sql_cur.fetchall()]
sql_cur.execute("SELECT * from species WHERE species_id=%s", (SPECIES_ID,))
db_species = sql_cur.fetchall()[0]
if db_meta['LineList'] != mol.ll_id or MetadataMalformed:
species_entry_dict = {key: value for (key, value) in [(db_species_cols[i], val) for i, val
in enumerate(db_species)]}
ism_set = ('ism_hotcore', 'ism_diffusecloud', 'comet', 'extragalactic', 'known_ast_molecules')
ism_set_dict = {key: value for (key, value) in [(key, species_entry_dict[key]) for key in ism_set]}
if any([val == '1' for val in ism_set_dict.values()]):
mol.metadata['ism'] = 1
else:
mol.metadata['ism'] = 0
ism_overlap_tags = ['ism_hotcore', 'comet', 'planet', 'AGB_PPN_PN', 'extragalactic']
for tag in ism_overlap_tags:
mol.metadata[tag] = species_entry_dict[tag]
mol.metadata['ism_diffuse'] = species_entry_dict['ism_diffusecloud']
mol.metadata['species_id'] = species_entry_dict['species_id']
mol.metadata['species_id_noparens'] = mol.s_name_noparens
# for row in zip(db_meta_cols, db_meta):
# print row[0],'\t',row[1]
# sql_cur.execute("SELECT * from species_metadata WHERE species_id=%s and v1_0=%s and v2_0=%s",
# (db_meta[0], mol.ll_id, db_meta[53], db_meta[54]))
if db_meta['LineList'] == mol.ll_id or not MetadataMalformed:
metadata_to_push = {}
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
metadata_to_push[col_name] = mol.metadata[col_name]
#elif db_meta[col_name] is not None:
# metadata_to_push[col_name] = db_meta[col_name]
else: # Hacky fix to ensure clean columns -- this cleans up columns with no default values that don't allow NULL or are values that aren't otherwise filled in by this routine
if col_name in ['ism', 'species_id', 'LineList']:
metadata_to_push[col_name] = db_meta[col_name]
else:
metadata_to_push = mol.metadata
# Generate new unique ID for metadata entry
try:
sql_cur.execute('SELECT MAX(line_id) FROM species_metadata')
except: # line_id doesn't exist in the database so just skip this step
pass
else:
if push_metadata_flag == 'APPEND':
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no metadata entries in the table, thus line_id should be "1".
metadata_to_push['line_id'] = 1
elif push_metadata_flag == 'REPLACE':
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0]))
except TypeError:
metadata_to_push['line_id'] = 1
# for key in metadata_to_push:
# print '%s: %s' %(key, metadata_to_push[key])
# QN formatting --- let's just do it on a case-by-case basis
qn_fmt = mol.cat['qn_code'][0]
fmtted_qns = []
print 'Preparing linelist...'
# Iterate through rows and add formatted QN
choice_idx = None
for idx, row in mol.cat.iterrows():
format, choice_idx = format_it(qn_fmt, row.filter(regex=re.compile('(qn_)'+'.*?'+'(_)'+'(\\d+)')),
choice_idx=choice_idx)
fmtted_qns.append(format)
# Push formatted quantum numbers to linelist
mol.cat['resolved_QNs'] = pd.Series(fmtted_qns, index=mol.cat.index)
if any(mol.cat['resolved_QNs'] == ''):
print '======================\n'+'WARNING: The parsing code did not parse the quantum numbers. This may be due to the CAT QN code not being programmed into QNParser, but also might be due to you choosing not to parse the QNs.\n Please contact your friendly code developer (Nathan) if you need help in this regard.\n'+'======================'
if metadata_to_push['ism'] == 1:
mol.cat['Lovas_NRAO'] = 1
else:
mol.cat['Lovas_NRAO'] = 0
# mol.cat['Lovas_NRAO'] = pd.Series(np.ones(len(mol.cat.index)), index=mol.cat.index)
# Prep linelist for submission to
sql_cur.execute("SHOW columns FROM main")
ll_splat_col_list = [tup[0] for tup in sql_cur.fetchall()]
ll_col_list = mol.cat.columns.values.tolist()
final_cat = mol.cat[[col for col in ll_splat_col_list if col in ll_col_list]]
return final_cat, metadata_to_push, push_metadata_flag, append_lines
def new_molecule(mol, sql_conn=None):
sql_cur = sql_conn.cursor()
# ----------------------------
# METADATA ADD
# ----------------------------
# Generate array of all columns from species_metadata so we can fill them in as we go
sql_cur.execute("SHOW columns FROM species_metadata")
db_meta_cols = [tup[0] for tup in sql_cur.fetchall()]
metadata_to_push = {}
# Fills in metadata dictionary with the column array we generated above as a list of keys for the metadata dict
for i, col_name in enumerate(db_meta_cols):
if col_name in mol.metadata.keys():
metadata_to_push[col_name] = mol.metadata[col_name]
else:
continue
# Generate new species_id
sql_cur.execute('SELECT MAX(species_id) FROM species')
try: # species_id is +1 of the largest species_id in the species table
metadata_to_push['species_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no species in the table; therefore species ID should be "1".
metadata_to_push['species_id'] = "1"
# Generate new unique ID for metadata entry
try:
sql_cur.execute('SELECT MAX(line_id) FROM species_metadata')
except: # line_id doesn't exist in the database so just skip this step
pass
else:
try:
metadata_to_push['line_id'] = str(int(sql_cur.fetchall()[0][0])+1)
except TypeError: # Gets thrown if there are no metadata entries in the table, thus line_id should be "1".
metadata_to_push['line_id'] = 1
# Odds and ends; we default to v4_0 for splat_2019
metadata_to_push['v1_0'] = '0'
metadata_to_push['v2_0'] = '0'
metadata_to_push['v3_0'] = '3'
#metadata_to_push['v4_0'] = '4'
metadata_to_push['Ref20'] = '<a href=' + "\"" + 'http://cdms.astro.uni-koeln.de'+mol.meta_url + "\"" + " target=\"_blank\">CDMS Entry</a>"
metadata_to_push['Ref19'] = metadata_to_push['Ref20'].replace('cdmsinfo?file=e','cdmssearch?file=c').replace('Entry', 'CAT file')
metadata_to_push['LineList'] = mol.ll_id
metadata_to_push['species_id_noparens'] = mol.s_name_noparens
# If you want to give the molecule a pretty new, or non-standard, name
new_name = eg.enterbox(msg="Do you want to change the descriptive metadata molecule name?"
" Leave blank otherwise. Current name is %s"
% metadata_to_push['Name'], title="Metadata Name Change")
if new_name is not '':
metadata_to_push['Name'] = new_name
# Generates new splat_id from the molecular mass
cmd = "SELECT SPLAT_ID FROM species " \
"WHERE SPLAT_ID LIKE '%s%%'" % str(mol.tag[:3])
sql_cur.execute(cmd)
splat_id_list = sql_cur.fetchall()
# If there's more than one species in the splatalogue list with the same molecular mass, this finds the minimum value necessary to make it a unique ID
if len(splat_id_list) > 0:
max_id = max([int(x[0][3:]) for x in splat_id_list])
if max_id < 9:
splat_id = mol.tag[:3] + '0'+str(max_id+1)
else:
splat_id = mol.tag[:3] + str(max_id+1)
else:
splat_id = mol.tag[:3] + '01'
# Self-explanatory: This is where we build the species row entry
species_to_push = OrderedDict([('species_id', metadata_to_push['species_id']),
('name', mol.formula), ('chemical_name', None), ('s_name', mol.s_name),
('s_name_noparens', mol.s_name_noparens),
('SPLAT_ID', splat_id), ('atmos', '0'), ('potential', '1'), ('probable', '0'),
('known_ast_molecules', '0'), ('planet', '0'), ('ism_hotcore', '0'),
('ism_diffusecloud', '0'), ('comet', '0'), ('extragalactic', '0'),
('AGB_PPN_PN', '0'), ('SLiSE_IT', '0'), ('Top20', '0')])
species_choices_fieldnames = ['%s (%s)' % (key, value) for key, value in species_to_push.items()]
species_choices = eg.multenterbox('Set species entries', 'species entry', species_choices_fieldnames)
# Ensures we keep a 1-1 correspondence between species_to_push and users' entries from above
for idx, key in enumerate(species_to_push):
if not species_choices[idx]: # If user entry is empty, we do nothing
pass
else:
species_to_push[key] = species_choices[idx]
# Interstellar values; instantiated separately so we can correlate between metadata and species ISM tags
ism_set = ('ism_hotcore', 'ism_diffusecloud', 'comet', 'extragalactic', 'known_ast_molecules')
ism_set_dict = {key: value for (key, value) in [(key, species_to_push[key]) for key in ism_set]}
# If it's an ISM detection, we probably want to make the freqs NRAO recommended ('Lovas_NRAO' column in main).
# This may need to be changed in the future, but this was decided under agreement with <NAME>
if any([val == '1' for val in ism_set_dict.values()]):
metadata_to_push['ism'] = 1
mol.cat['Lovas_NRAO'] = 1 # Sets all lines in linelist to be NRAO recommended if it's a detected molecule
else:
metadata_to_push['ism'] = 0
# ISM tag overlap between metadata and species tables
ism_overlap_tags = ['ism_hotcore', 'comet', 'planet', 'AGB_PPN_PN', 'extragalactic', 'species_id',]
for tag in ism_overlap_tags:
metadata_to_push[tag] = species_to_push[tag]
# Format quantum numbers
qn_fmt = mol.cat['qn_code'][0]
fmtted_qns = []
# Iterate through rows and add formatted QN
choice_idx = None
for idx, row in mol.cat.iterrows():
format, choice_idx = format_it(qn_fmt, row.filter(regex=re.compile('(qn_)'+'.*?'+'(_)'+'(\\d+)')),
choice_idx=choice_idx) # See QNFormat.py for this function
fmtted_qns.append(format)
# Push formatted quantum numbers to linelist
mol.cat['resolved_QNs'] = pd.Series(fmtted_qns, index=mol.cat.index)
if any(mol.cat['resolved_QNs'] == ''):
print '======================\n'+'WARNING: The parsing code did not parse the quantum numbers. This may be due to the CAT QN code not being programmed into QNParser, but also might be due to you choosing not to parse the QNs.\n Please contact your friendly code developer (Nathan) if you need help in this regard.\n'+'======================'
# Prep linelist for submission to database
sql_cur.execute("SHOW columns FROM main")
ll_splat_col_list = [tup[0] for tup in sql_cur.fetchall()] # Generates a list of columns from the main table
ll_col_list = mol.cat.columns.values.tolist() # Kick it out of the dataframe so we can throw it into a list, which mysqldb can handle
final_cat = mol.cat[[col for col in ll_splat_col_list if col in ll_col_list]] # Gets rid of dataframe columns NOT in the main column list
return final_cat, species_to_push, metadata_to_push
def push_molecule(db, ll, spec_dict, meta_dict, push_metadata_flag='APPEND', append_lines_flag=False, update=0):
# push_molecule() takes all the prepared entry data from either new_molecule() or process_update() and pushes it to the database
for key in meta_dict: # For your viewing pleasure
print key, '\t', meta_dict[key]
print 'Converting linelist for SQL insertion...'
ll['species_id'] = meta_dict['species_id'] # Copies species_id from metadata, where it's first generated, into the linelist
ll = ll.where(pd.notnull(ll),None)
ll_dict = [tuple(x) for x in ll.to_numpy()]
num_entries = len(ll_dict)
#ll_dict = [(None if pd.isnull(y) else y for y in x) for x in ll.to_numpy()] # Ensures NULL values for empty rows
# Create new species entry in database
# Helper functions to generate well-formed SQL INSERT statements
def placeholders(inp): return ', '.join(['%s'] * len(inp))
def placeholders_err(inp): return ', '.join(['{}'] * len(inp))
def columns(inp): return ', '.join(inp.keys())
def query(table, inp): return "INSERT INTO %s ( %s ) VALUES ( %s )" % (table, columns(inp), placeholders(inp))
def query_err(table, inp): "INSERT INTO %s ( %s ) VALUES ( %s )" % \
(table, columns(inp), placeholders_err(inp))
# Add some last minute entries to dictionaries
spec_dict['created'] = time.strftime('%Y-%m-%d %H:%M:%S')
spec_dict['nlines'] = str(len(ll.index))
spec_dict['version'] = '1'
spec_dict['resolved'] = '1'
if update:
# Update a few things in species column
print 'Updating species table...'
cursor = db.cursor()
cursor.execute('UPDATE species SET created=%s WHERE species_id=%s',
(spec_dict['created'], meta_dict['species_id']))
cursor.execute('UPDATE species SET nlines=%s WHERE species_id=%s',
(spec_dict['nlines'], meta_dict['species_id']))
# Replace old NRAO recommended frequency tags so the updated data set becomes the new NRAO rec
if meta_dict['ism'] == 1:
print 'Removing previous Lovas NRAO recommended frequencies, if necessary...'
cursor.execute('UPDATE main SET Lovas_NRAO = 0 WHERE species_id=%s', (meta_dict['species_id'],))
if push_metadata_flag == 'REPLACE':
print 'Removing duplicate metadata, if neeeded...' # In case of duplicate data
cursor.execute('DELETE FROM species_metadata WHERE species_id=%s AND LineList=%s AND v3_0 = 3', (meta_dict['species_id'], meta_dict['LineList']))
if not append_lines_flag:
print 'Removing previous current version lines if available...' # Prevents doubling of entries, such as in case of an accidental update
cursor.execute('DELETE FROM main WHERE species_id=%s AND `v3.0`=3 AND ll_id=%s', (meta_dict['species_id'], meta_dict['LineList']))
cursor.close()
else:
cursor = db.cursor()
print 'Creating new entry in species table...'
try:
cursor.execute(query("species", spec_dict), spec_dict.values())
except sqldb.ProgrammingError:
print "The following query failed: "
print query_err("species", spec_dict).format(*spec_dict.values())
print 'Finished successfully. Created entry for %s with species_id %s and SPLAT_ID %s \n' \
% (spec_dict['name'], spec_dict['species_id'], spec_dict['SPLAT_ID'])
cursor.close()
### This commented block of code enables replacement of older metadata content in species_metadata if updating an entry;
### current behavior is to merely append the new metadata content to species_metadata and preserve older versions.
# if update:
# cursor = db.cursor()
# print 'Removing original metadata entry for replacing with new data...'
# cursor.execute('DELETE from species_metadata WHERE species_id=%s AND v1_0=%s AND v2_0=%s AND LineList=%s',
# (meta_dict['species_id'], meta_dict['v1_0'], meta_dict['v2_0'], meta_dict['LineList']))
# print 'Removing original linelist for replacement...'
# cursor.execute('DELETE from main WHERE species_id=%s AND ll_id=%s',
# (meta_dict['species_id'], meta_dict['LineList']))
# cursor.close()
# Create new metadata entry in database
if (update == 0) or (update == 1 and push_metadata_flag in ('APPEND','REPLACE')):
cursor = db.cursor()
print 'Creating new entry in metadata table...'
try:
cursor.execute(query("species_metadata", meta_dict), meta_dict.values())
except sqldb.ProgrammingError:
print "The folllowing query failed: "
print query_err("species_metadata", meta_dict).format(*meta_dict.values())
print 'Finished successfully.\n'
cursor.close()
print 'Pushing linelist (%s entries) to database...' % num_entries
cursor = db.cursor()
# Generates a giant INSERT query for all rows in the linelist.
# This is a MUCH faster process than having Python loop through each row and insert it manually.
query_ll = "INSERT INTO %s ( %s ) VALUES ( %s )" \
% ("main", ', '.join(ll.columns.values), placeholders(ll.columns.values))
try:
cursor.executemany(query_ll, ll_dict)
except sqldb.ProgrammingError:
print 'Pushing linelist failed.'
except TypeError:
raise
cursor.close()
db.commit()
print 'Finished with linelist push.'
def main(db):
# ------------------
# POPULATE CDMS LIST
# ------------------
pd.options.mode.chained_assignment = None
print 'Pulling updates from CDMS...'
update_list = pull_updates()
choice_list = [CDMSChoiceList([str(i)]+update_list[i]) for i in range(len(update_list))]
# RUN PROCESS
CDMSLoop = True
while CDMSLoop:
cursor = db.cursor()
choice = eg.choicebox("Choose a Molecule to Update", "Choice", choice_list)
custom_cat_file = eg.buttonbox(msg='Would you like to supply a custom CAT file?', choices=['Yes', 'No'])
if custom_cat_file == 'Yes':
custom_path = eg.fileopenbox(msg='Please select a CAT file.', title='Custom CAT file')
cat_entry = CDMSMolecule(update_list[int(choice[0:5])], custom=True, custom_path=custom_path)
else:
cat_entry = CDMSMolecule(update_list[int(choice[0:5])], custom=False)
# Queries database for all species with valid "SPLAT IDs"
cmd = "SELECT * FROM species " \
"WHERE SPLAT_ID LIKE '%s%%'" % str(cat_entry.tag[:3])
cursor.execute(cmd)
res = cursor.fetchall()
# Hacky way to get easygui to correlate the mysql query output rows to rows in the GUI molecule list
splatmolresults = [SplatSpeciesResultList([i]+list(x)) for i, x in enumerate(res)]
splatmolresults += [SplatSpeciesResultList([len(splatmolresults),999999999,0,'NEW MOLECULE',
'X', '', '', '', '', '', ''])]
choice2 = eg.choicebox("Pick molecule from Splatalogue to update, or create a new molecule.\n "
"Current choice is:\n %s" % choice, "Splatalogue Search Results",
splatmolresults)
cursor.close()
if choice2[68] == 'X': # New molecule
linelist, species_final, metadata_final = new_molecule(cat_entry, db)
push_molecule(db, linelist, species_final, metadata_final, update=0)
else: # Molecule already exists in Splatalogue database
linelist, metadata_final, push_metadata_flag, append_lines_flag = process_update(cat_entry, res[int(choice2[0:5])], db)
push_molecule(db=db, ll=linelist, spec_dict={}, meta_dict=metadata_final, push_metadata_flag=push_metadata_flag, append_lines_flag=append_lines_flag, update=1)
choice3 = eg.buttonbox(msg='Do you want to update another CDMS entry?', choices=['Yes', 'No'])
if choice3 == 'No':
CDMSLoop = False
|
en
| 0.747823
|
This function takes a Pickett prediction file (a so-called "CAT" file) and converts it into a Pandas DataFrame. This code should work for any well-formed CAT file, and works for all CDMS and JPL entries, as well as custom, user-generated CAT files. It is unclear if there are any edge cases this misses as CAT files are fairly rigorous in their formatting. # For when a QN > 99 # Generates a parsing string formatter for CAT file rows # bool for padding # don't need final one # Character widths for each CAT file entry, not including quantum numbers # cat_url is a string: # Save cat_inp to working directory # Split by line to ready CAT file for parse # Parses the CAT file into chunks # Let's put everything together to put into a dataframe # For fitting strings into temporary numpy array # Get number of quantum numbers per state # You should never end up here unless there's a crazy edge case or a badly formed CAT file. # This is a really hacky way to parse the quantum numbers, but it's robust and has worked without a hitch so far. # Uses a series of if-else statements to iterate through the QNs in a linear fashion # splits QN entry into pairs # Boolean for being done with the upper state QNs # Are we in the character gap between the upper and lower state QNs? # Boolean for being down with the lower state QNs # Still in the upper state # In case it isn't an integer quantum number # For parity symbols in CH3OH, for instance # No parity symbol? # QN > 99 # QN < -9, e.g. CDMS CD3CN entry # You shouldn't ever get here, but just in case... # Hit the beginning of the lower states # QN > 99 # QN < -9, e.g. CDMS CD3CN entry # Get blank line or other issue? # Blank line # Upper state degeneracy > 99: # Generates columns for dataframe that correlate with columns in main # Not used but useful in case you want to append custom lines to a linelist # Get rid of any junk after a comma, usually some state descriptor # Do isotopes first # Replace every other number with <sub> # Add <sub> to any parenthesized subgroup of the formula # Now, let's build s_name and s_name_noparens # Scrapes CDMS site to generate metadata # Dictionary for metadata, keys are consistent with columns in SQL # Dictionaries to connect string values in CDMS metadata to SQL columns # Bullshit workaround to fix some non-standard stuff in Holger's entries # Initialize scraper # Grab formula # Need to add lit data / dipoles etc # Right now it seems the best way is to brute force this # Pull out spin-rotation partition function values # Some hard-coded replace statements for weird things that don't parse correctly when displaying the metadata # Fixes long dashes that Holger sometimes likes to use # Calculates all derived parameters from data in the CAT file, e.g. lower/upper state energies, sijmu2 values, etc. # Currently does NOT calculate sij values, because of the case-by-case, or even line-by-line, difficulty on how to identify the electric dipole to divide by # in case there's multiple numbers # Add measured freqs and then ordered frequencies # Check to see if folder already exists; if so, we'll append an integer to it # There might be more than 1, so we should add +1 to the tally if so # Write parsed CAT dataframe to CSV file # Strips any non-ASCII unicode text from string # URLs to CAT and Documentation (metadata) files # Text from table entries # Close HTML sock # 0 --> tag, 1 --> Molecule, 2 --> struct_time obj, 3 --> cat file, 4 --> metadata # try: # Because Holger isn't consistent with his date formatting # formatted_date = time.strptime(date, "%b. %Y") # except ValueError: # try: # formatted_date = time.strptime(date, "%B %Y") # except ValueError: # formatted_date = time.strptime(date, "%b %Y") # except ValueError: # formatted_date = time.strptime(date, "%B. %Y") # Sorts by update time, most recent first Flow for process_update: 1) Check metadata, update if needed 2) Set QN formatting (????) 3) Delete CDMS-related linelist from Splatalogue 4) Push new linelist and metadata to Splatalogue # ---------------------------- # METADATA PULL CHECK & UPDATE # ---------------------------- # There's more than one linelist associated with the chosen species_id and linelist ID # Species exists but there are no metadata entries, so we can have to populate a new one # Only catches when species exists but metadata doesn't # Only entry in database isn't from the linelist of the entry that user wants to update # Check to see first column to place reference info # ref_idx = 1 # while True: # if not db_meta['Ref%s'%ref_idx]: # break # ref_idx += 1 #mol.metadata['Ref%s'%ref_idx] = mol.metadata.pop('Ref1') # meta_fields = ['%s \t %s' %(a[0],a[1]) for a in zip(db_meta_cols, db_meta) if 'Ref' not in a[0]] # for row in zip(db_meta_cols, db_meta): # print row[0],'\t',row[1] # sql_cur.execute("SELECT * from species_metadata WHERE species_id=%s and v1_0=%s and v2_0=%s", # (db_meta[0], mol.ll_id, db_meta[53], db_meta[54])) #elif db_meta[col_name] is not None: # metadata_to_push[col_name] = db_meta[col_name] # Hacky fix to ensure clean columns -- this cleans up columns with no default values that don't allow NULL or are values that aren't otherwise filled in by this routine # Generate new unique ID for metadata entry # line_id doesn't exist in the database so just skip this step # Gets thrown if there are no metadata entries in the table, thus line_id should be "1". # for key in metadata_to_push: # print '%s: %s' %(key, metadata_to_push[key]) # QN formatting --- let's just do it on a case-by-case basis # Iterate through rows and add formatted QN # Push formatted quantum numbers to linelist # mol.cat['Lovas_NRAO'] = pd.Series(np.ones(len(mol.cat.index)), index=mol.cat.index) # Prep linelist for submission to # ---------------------------- # METADATA ADD # ---------------------------- # Generate array of all columns from species_metadata so we can fill them in as we go # Fills in metadata dictionary with the column array we generated above as a list of keys for the metadata dict # Generate new species_id # species_id is +1 of the largest species_id in the species table # Gets thrown if there are no species in the table; therefore species ID should be "1". # Generate new unique ID for metadata entry # line_id doesn't exist in the database so just skip this step # Gets thrown if there are no metadata entries in the table, thus line_id should be "1". # Odds and ends; we default to v4_0 for splat_2019 #metadata_to_push['v4_0'] = '4' # If you want to give the molecule a pretty new, or non-standard, name # Generates new splat_id from the molecular mass # If there's more than one species in the splatalogue list with the same molecular mass, this finds the minimum value necessary to make it a unique ID # Self-explanatory: This is where we build the species row entry # Ensures we keep a 1-1 correspondence between species_to_push and users' entries from above # If user entry is empty, we do nothing # Interstellar values; instantiated separately so we can correlate between metadata and species ISM tags # If it's an ISM detection, we probably want to make the freqs NRAO recommended ('Lovas_NRAO' column in main). # This may need to be changed in the future, but this was decided under agreement with <NAME> # Sets all lines in linelist to be NRAO recommended if it's a detected molecule # ISM tag overlap between metadata and species tables # Format quantum numbers # Iterate through rows and add formatted QN # See QNFormat.py for this function # Push formatted quantum numbers to linelist # Prep linelist for submission to database # Generates a list of columns from the main table # Kick it out of the dataframe so we can throw it into a list, which mysqldb can handle # Gets rid of dataframe columns NOT in the main column list # push_molecule() takes all the prepared entry data from either new_molecule() or process_update() and pushes it to the database # For your viewing pleasure # Copies species_id from metadata, where it's first generated, into the linelist #ll_dict = [(None if pd.isnull(y) else y for y in x) for x in ll.to_numpy()] # Ensures NULL values for empty rows # Create new species entry in database # Helper functions to generate well-formed SQL INSERT statements # Add some last minute entries to dictionaries # Update a few things in species column # Replace old NRAO recommended frequency tags so the updated data set becomes the new NRAO rec # In case of duplicate data # Prevents doubling of entries, such as in case of an accidental update ### This commented block of code enables replacement of older metadata content in species_metadata if updating an entry; ### current behavior is to merely append the new metadata content to species_metadata and preserve older versions. # if update: # cursor = db.cursor() # print 'Removing original metadata entry for replacing with new data...' # cursor.execute('DELETE from species_metadata WHERE species_id=%s AND v1_0=%s AND v2_0=%s AND LineList=%s', # (meta_dict['species_id'], meta_dict['v1_0'], meta_dict['v2_0'], meta_dict['LineList'])) # print 'Removing original linelist for replacement...' # cursor.execute('DELETE from main WHERE species_id=%s AND ll_id=%s', # (meta_dict['species_id'], meta_dict['LineList'])) # cursor.close() # Create new metadata entry in database # Generates a giant INSERT query for all rows in the linelist. # This is a MUCH faster process than having Python loop through each row and insert it manually. # ------------------ # POPULATE CDMS LIST # ------------------ # RUN PROCESS # Queries database for all species with valid "SPLAT IDs" # Hacky way to get easygui to correlate the mysql query output rows to rows in the GUI molecule list # New molecule # Molecule already exists in Splatalogue database
| 2.550103
| 3
|
fab_support/__init__.py
|
drummonds/fab_support
| 3
|
6627730
|
<filename>fab_support/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for fab_support."""
from ._version import *
from .utils import * # Import first as also imported by platform_support
from .env_support import *
from .platform_support import fab_support_function
from .stages_support import list_stages
|
<filename>fab_support/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for fab_support."""
from ._version import *
from .utils import * # Import first as also imported by platform_support
from .env_support import *
from .platform_support import fab_support_function
from .stages_support import list_stages
|
en
| 0.922239
|
# -*- coding: utf-8 -*- Top-level package for fab_support. # Import first as also imported by platform_support
| 1.224474
| 1
|
paramtools/tests/test_parameters.py
|
PSLmodels/ParamTools
| 18
|
6627731
|
import copy
import os
import json
import datetime
from collections import OrderedDict
from random import shuffle
import pytest
import numpy as np
import marshmallow as ma
from paramtools import (
ParamToolsError,
ValidationError,
SparseValueObjectsException,
InconsistentLabelsException,
collision_list,
ParameterNameCollisionException,
register_custom_type,
Parameters,
Values,
Slice,
)
from paramtools.contrib import Bool_
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture
def defaults_spec_path():
return os.path.join(CURRENT_PATH, "defaults.json")
@pytest.fixture
def extend_ex_path():
return os.path.join(CURRENT_PATH, "extend_ex.json")
@pytest.fixture
def array_first_defaults(defaults_spec_path):
with open(defaults_spec_path) as f:
r = json.loads(f.read())
r.pop("float_list_param")
r.pop("simple_int_list_param")
r.pop("float_list_when_param")
r.pop("when_array_param")
return r
@pytest.fixture
def TestParams(defaults_spec_path):
class _TestParams(Parameters):
defaults = defaults_spec_path
return _TestParams
@pytest.fixture(scope="function")
def af_params(array_first_defaults):
class AFParams(Parameters):
defaults = array_first_defaults
_af_params = AFParams(
initial_state={"label0": "zero", "label1": 1}, array_first=True
)
return _af_params
def test_init(TestParams):
params = TestParams()
assert params
assert params._data
for param in params._data:
assert getattr(params, param)
assert params.label_grid
assert params.label_grid == params._stateless_label_grid
class TestSchema:
def test_empty_schema(self):
class Params(Parameters):
array_first = True
defaults = {
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"type": "str",
"value": "hello world",
}
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {}
def test_schema_just_labels(self):
class Params(Parameters):
array_first = True
defaults = {
"schema": {
"labels": {
"somelabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 2}},
}
}
},
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"type": "str",
"value": "hello world",
},
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {"somelabel": [0, 1, 2]}
def test_schema_just_additional_members(self):
class Params(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members": {"additional": {"type": "str"}}
},
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"additional": "I'm extra",
"type": "str",
"value": "hello world",
},
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {}
def test_schema_not_dropped(self, defaults_spec_path):
with open(defaults_spec_path, "r") as f:
defaults_ = json.loads(f.read())
class TestParams(Parameters):
defaults = defaults_
TestParams()
assert defaults_["schema"]
def test_schema_with_errors(self):
class Params1(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members": {"additional": {"type": 1234}}
}
}
with pytest.raises(ma.ValidationError):
Params1()
class Params2(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members_123": {"additional": {"type": "str"}}
}
}
with pytest.raises(ma.ValidationError):
Params2()
def test_operators_spec(self):
class Params1(Parameters):
array_first = False
defaults = {
"schema": {
"labels": {
"mylabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 10}},
},
"somelabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 10}},
},
},
"operators": {
"array_first": False,
"label_to_extend": "somelabel",
},
}
}
params = Params1(array_first=True, label_to_extend="mylabel")
assert params.array_first
assert params.label_to_extend == "mylabel"
assert params.operators == {
"array_first": True,
"label_to_extend": "mylabel",
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
Params1.array_first = True
params = Params1()
assert params.array_first
assert params.label_to_extend == "somelabel"
assert params.operators == {
"array_first": True,
"label_to_extend": "somelabel",
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
class Params2(Parameters):
defaults = {"schema": {"operators": {"array_first": True}}}
params = Params2()
assert params.array_first
assert params.label_to_extend is None
assert params.operators == {
"array_first": True,
"label_to_extend": None,
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
class Params3(Parameters):
array_first = True
label_to_extend = "hello"
defaults = {"schema": {"operators": {"array_first": True}}}
params = Params3(array_first=False, label_to_extend=None)
assert params.operators == {
"array_first": False,
"label_to_extend": None,
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
params.array_first = True
assert params.dump()["schema"]["operators"] == params.operators
def test_when_schema(self):
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"validators": {
"when": {
"param": "default",
"is": {"less_than": 0, "greater_than": 1},
"then": {"range": {"min": 0}},
"otherwise": {"range": {"min": "default"}},
}
},
}
}
with pytest.raises(ma.ValidationError):
Params()
def test_custom_fields(self):
class Custom(ma.Schema):
hello = ma.fields.Boolean()
world = Bool_() # Tests data is serialized.
register_custom_type("custom_type", ma.fields.Nested(Custom()))
class Params(Parameters):
defaults = {
"schema": {
"labels": {"custom_label": {"type": "custom_type"}},
"additional_members": {"custom": {"type": "custom_type"}},
},
"param": {
"title": "",
"description": "",
"type": "int",
"value": [{"custom_label": {"hello": True}, "value": 0}],
"custom": {"hello": True, "world": True},
},
}
params = Params()
assert params
assert params._data["param"]["custom"] == {
"hello": True,
"world": True,
}
assert params.adjust(
{
"param": [
{
"custom_label": {"hello": True, "world": True},
"value": 1,
}
]
}
)
assert params.sel["param"].isel[:] == [
{"custom_label": {"hello": True}, "value": 0},
{"custom_label": {"hello": True, "world": True}, "value": 1},
]
class BadSpec(Parameters):
field_map = {"custom_type": ma.fields.Nested(Custom)}
defaults = {
"schema": {
"additional_members": {"custom": {"type": "custom_type"}}
},
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"custom": {"hello": 123, "world": "whoops"},
},
}
with pytest.raises(ma.ValidationError):
BadSpec()
class TestValues:
def test(self, TestParams, defaults_spec_path):
params = TestParams()
assert isinstance(params.sel["min_int_param"], Values)
assert isinstance(params.sel["min_int_param"]["label0"], Slice)
with pytest.raises(AttributeError):
params["min_int_param"]
class TestAccess:
def test_specification(self, TestParams, defaults_spec_path):
params = TestParams()
spec1 = params.specification()
with open(defaults_spec_path) as f:
exp = json.loads(f.read())
exp.pop("schema")
assert set(spec1.keys()) == set(exp.keys())
assert spec1["min_int_param"] == exp["min_int_param"]["value"]
def test_is_ordered(self, TestParams):
params = TestParams()
spec1 = params.specification()
assert isinstance(spec1, OrderedDict)
spec2 = params.specification(meta_data=True, serializable=True)
assert isinstance(spec2, OrderedDict)
def test_specification_query(self, TestParams):
params = TestParams()
spec1 = params.specification()
exp = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"max_int_param": [{"label0": "one", "label1": 2, "value": 4}],
}
spec2 = params.specification(label0="one")
# check that specification method got only the value item with label0="one"
assert spec2["min_int_param"] == exp["min_int_param"]
assert spec2["max_int_param"] == exp["max_int_param"]
# check that get method got only value item with label0="one"
params.set_state(label0="one")
assert params.min_int_param == exp["min_int_param"]
assert params.max_int_param == exp["max_int_param"]
# check that specification method gets other data, not containing a label0
# label.
for param, data in spec1.items():
if all("label0" not in val_item for val_item in data):
assert spec2[param] == data
params.delete({"str_choice_param": None})
assert "str_choice_param" not in params.specification()
assert "str_choice_param" in params.specification(include_empty=True)
def test_serializable(self, TestParams, defaults_spec_path):
params = TestParams()
assert json.dumps(params.specification(serializable=True))
assert json.dumps(
params.specification(serializable=True, meta_data=True)
)
spec = params.specification(serializable=True)
# Make sure "value" is removed when meta_data is False
for value in spec.values():
assert "value" not in value
with open(defaults_spec_path) as f:
exp = json.loads(f.read())
exp.pop("schema")
spec = params.specification(serializable=True, meta_data=True)
assert spec == params._defaults_schema.dump(
params._defaults_schema.load(exp)
)
def test_dump(self, TestParams, defaults_spec_path):
params1 = TestParams()
spec = params1.specification(serializable=True, meta_data=True)
schema = params1._schema
dumped = params1.dump()
assert dumped == {**spec, **{"schema": schema}}
class TestParams2(Parameters):
defaults = dumped
params2 = TestParams2()
assert params2.dump() == dumped
def test_dump_with_labels(self, TestParams, defaults_spec_path):
params1 = TestParams()
spec = params1.specification(
serializable=True,
include_empty=True,
meta_data=True,
label0="one",
sort_values=True,
)
schema = params1._schema
params1.set_state(label0="one")
dumped = params1.dump(sort_values=True)
assert dumped == {**spec, **{"schema": schema}}
class TestParams2(Parameters):
defaults = dumped
params2 = TestParams2()
params2.set_state(label0="one")
assert params2.dump() == dumped
def test_iterable(self, TestParams):
params = TestParams()
act = set([])
for param in params:
act.add(param)
assert set(params._data.keys()) == act
assert set(params._data.keys()) == set(params.keys())
for param, data in params.items():
np.testing.assert_equal(data, getattr(params, param))
def test_sort_values(self, TestParams):
"""Ensure sort runs and is stable"""
sorted_tp = TestParams()
sorted_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == TestParams().dump(
sort_values=False
)
shuffled_tp = TestParams()
for param in shuffled_tp:
shuffle(shuffled_tp._data[param]["value"])
shuffled_tp.sel._cache = {}
assert sorted_tp.dump(sort_values=False) != shuffled_tp.dump(
sort_values=False
)
shuffled_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == shuffled_tp.dump(
sort_values=False
)
# Test attribute is updated, too.
for param in sorted_tp:
assert getattr(sorted_tp, param) == getattr(shuffled_tp, param)
def test_sort_values_no_labels(self):
class Params(Parameters):
defaults = {
"test": {
"title": "test",
"description": "",
"type": "int",
"value": 2,
}
}
params = Params()
assert params.sort_values() == params._data
assert params.sort_values({"test": params.test})
assert params.dump()
def test_sort_values_correctness(self):
"""Ensure sort is correct"""
exp = [
{"value": 1},
{"label0": 1, "label1": "one", "value": 1},
{"label0": 1, "label1": "two", "value": 1},
{"label0": 2, "label1": "one", "value": 1},
{"label0": 2, "label1": "two", "value": 1},
{"label0": 3, "label1": "one", "value": 1},
]
shuffled = copy.deepcopy(exp)
shuffle(shuffled)
class Params(Parameters):
defaults = {
"schema": {
"labels": {
"label0": {
"type": "int",
"validators": {"range": {"min": 0, "max": 3}},
},
"label1": {
"type": "str",
"validators": {
"choice": {"choices": ["one", "two"]}
},
},
}
},
"param": {
"title": "test",
"description": "",
"type": "int",
"value": shuffled,
},
}
params = Params(sort_values=False)
assert params.param != exp and params.param == shuffled
params.sort_values()
assert params.param == exp
# test passing in a data object
params = Params(sort_values=False)
assert params.param != exp and params.param == shuffled
data1 = {"param": params.param}
params.sort_values(data1, has_meta_data=False)
data1 = copy.deepcopy(data1)
data2 = {"param": {"value": params.param}}
params.sort_values(data2, has_meta_data=True)
data2 = copy.deepcopy(data2)
params.sort_values()
assert data1["param"] == data2["param"]["value"] == params.param
with pytest.raises(ParamToolsError):
params.sort_values(has_meta_data=False)
def test_dump_sort_values(self, TestParams):
"""Test sort_values keyword in dump()"""
tp = TestParams()
for param in tp:
shuffle(tp._data[param]["value"])
tp.sel._cache = {}
shuffled_dump = tp.dump(sort_values=False)
sorted_dump = tp.dump(sort_values=True)
assert sorted_dump != shuffled_dump
sorted_tp = TestParams()
sorted_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == sorted_dump
# Test that sort works when state is activated
state_tp = TestParams()
for param in tp:
shuffle(state_tp._data[param]["value"])
state_tp.set_state(label0="zero", label2=1)
state_dump = copy.deepcopy(state_tp.dump(sort_values=False))
class NoStateParams(Parameters):
defaults = state_dump
nostate_tp = NoStateParams(sort_values=False)
assert nostate_tp.dump(sort_values=False) == state_dump
assert not nostate_tp.view_state()
assert state_tp.view_state()
assert nostate_tp.dump(sort_values=True) == state_tp.dump(
sort_values=True
)
def test_sort_values_w_array(self, extend_ex_path):
"""Test sort values with array first config"""
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
# Test that param attributes are not updated when
# array first is True
params = ExtParams()
params.extend_param = "don't update me"
params.sort_values()
assert params.extend_param == "don't update me"
def test_sort_values_with_state(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = False
params = ExtParams()
params.set_state(d0=[6, 7, 8, 9])
params.sort_values()
assert params.extend_param == [
{"d0": 6, "d1": "c1", "value": 5, "_auto": True},
{"d0": 6, "d1": "c2", "value": 6, "_auto": True},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
]
class TestAdjust:
def test_adjust_int_param(self, TestParams):
params = TestParams()
params.set_state(label0="one", label1=2)
adjustment = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 3}]
}
params.adjust(adjustment)
assert params.min_int_param == adjustment["min_int_param"]
def test_simultaneous_adjust(self, TestParams):
"""
Adjust min_int_param above original max_int_param value at same time as
max_int_param value is adjusted up. This tests that the new param is
compared against the adjusted reference param if the reference param is
specified.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 4}],
"max_int_param": [{"label0": "zero", "label1": 1, "value": 5}],
}
params.adjust(adjustment)
assert params.min_int_param == adjustment["min_int_param"]
assert params.max_int_param == adjustment["max_int_param"]
def test_transaction(self, TestParams):
"""
Use transaction manager to defer schema level validation until all adjustments
are complete.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 4}],
"max_int_param": [{"label0": "zero", "label1": 1, "value": 5}],
}
with params.transaction(defer_validation=True):
params.adjust({"min_int_param": adjustment["min_int_param"]})
params.adjust({"max_int_param": adjustment["max_int_param"]})
assert params.min_int_param == adjustment["min_int_param"]
assert params.max_int_param == adjustment["max_int_param"]
def test_transaction_with_when_parameter(self, TestParams):
"""
When validator returns None when validate_schema is False for performance
reasons.
"""
params = TestParams()
with params.transaction(defer_validation=True):
params.adjust({"when_param": 2, "str_choice_param": "value1"})
assert params.when_param == [{"value": 2}]
def test_adjust_many_labels(self, TestParams):
"""
Adjust min_int_param above original max_int_param value at same time as
max_int_param value is adjusted up. This tests that the new param is
compared against the adjusted reference param if the reference param is
specified.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"int_default_param": 5,
"date_param": [
{"label0": "zero", "label1": 1, "value": "2018-01-17"}
],
}
params.adjust(adjustment)
# min_int_param is adjusted in the _data attribute but the instance
# attribute min_int_param is not.
spec = params.specification(use_state=False, label0="one", label1=2)
assert spec["min_int_param"] == adjustment["min_int_param"]
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1}
]
assert params.int_default_param == [
{"value": adjustment["int_default_param"]}
]
assert params.date_param == [
{
"value": datetime.date(2018, 1, 17),
"label1": 1,
"label0": "zero",
}
]
def test_adjust_none_basic(self, TestParams):
params = TestParams()
adj = {
"min_int_param": [{"label0": "one", "label1": 2, "value": None}],
"str_choice_param": [{"value": None}],
}
params.adjust(adj)
print(params.str_choice_param)
assert len(params.min_int_param) == 1
assert len(params.str_choice_param) == 0
def test_adjust_none_many_values(self, TestParams):
params = TestParams()
adj = {"int_dense_array_param": [{"value": None}]}
params.adjust(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 0
assert len(params.int_dense_array_param) == 0
params = TestParams()
adj = {"int_dense_array_param": [{"label0": "zero", "value": None}]}
params.adjust(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 18
assert len(params.int_dense_array_param) == 18
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="zero"
)["int_dense_array_param"]
)
== 0
)
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="one"
)["int_dense_array_param"]
)
== 18
)
def test_delete(self, TestParams):
params = TestParams()
adj = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"str_choice_param": None,
}
params.delete(adj)
assert len(params.min_int_param) == 1
assert len(params.str_choice_param) == 0
params = TestParams()
adj = {"int_dense_array_param": None}
params.delete(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 0
assert len(params.int_dense_array_param) == 0
params = TestParams()
adj = {"int_dense_array_param": [{"label0": "zero", "value": 2}]}
params.delete(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 18
assert len(params.int_dense_array_param) == 18
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="zero"
)["int_dense_array_param"]
)
== 0
)
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="one"
)["int_dense_array_param"]
)
== 18
)
def test_adjust_when_param(self, TestParams):
params = TestParams()
params.adjust({"when_param": 2, "str_choice_param": "value1"})
assert params.when_param == [{"value": 2}]
params = TestParams()
params.adjust({"when_param": 0})
assert params.when_param == [{"value": 0}]
def test_adjust_when_array_param(self, TestParams):
params = TestParams()
params.adjust({"when_array_param": [0, 1, 0, 0]})
assert params.when_array_param == [{"value": [0, 1, 0, 0]}]
def test_adjust_float_list_when_param(self, TestParams):
params = TestParams()
params.adjust({"float_list_when_param": [0, 2.0, 2.0, 2.0]})
assert params.float_list_when_param == [
{"label0": "zero", "value": [0, 2.0, 2.0, 2.0]}
]
class TestValidationMessages:
def test_attributes(self, TestParams):
params = TestParams()
assert params.errors == {}
assert params.warnings == {}
def test_errors(self, TestParams):
params = TestParams()
adj = {"min_int_param": [{"value": "abc"}]}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
exp_user_message = {"min_int_param": ["Not a valid integer: abc."]}
assert json.loads(excinfo.value.args[0]) == {
"errors": exp_user_message
}
exp_internal_message = {
"min_int_param": [["Not a valid integer: abc."]]
}
assert excinfo.value.messages["errors"] == exp_internal_message
exp_labels = {"min_int_param": [{}]}
assert excinfo.value.labels["errors"] == exp_labels
params = TestParams()
adj = {"min_int_param": "abc"}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
def test_label_errors(self, TestParams):
params = TestParams()
params.adjust(
{"min_int_param": [{"value": 2, "label1": 6}]}, raise_errors=False
)
assert params.errors["min_int_param"] == [
"Input 6 must be less than 5."
]
params = TestParams()
params.adjust(
{"min_int_param": [{"value": 2, "label1": -1}]}, raise_errors=False
)
assert params.errors["min_int_param"] == [
"Input -1 must be greater than 0."
]
def test_errors_choice_param(self, TestParams):
params = TestParams()
adjustment = {"str_choice_param": [{"value": "not a valid choice"}]}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = [
'str_choice_param "not a valid choice" must be in list of choices value0, '
"value1."
]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
adjustment = {"str_choice_param": [{"value": 4}]}
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = ["Not a valid string."]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
params.adjust(adjustment, raise_errors=False)
msg = ["Not a valid string."]
assert params.errors["str_choice_param"] == msg
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = ["Not a valid string."]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
params.adjust(adjustment, raise_errors=False)
params.errors["str_choice_param"] == ["Not a valid string."]
def test_errors_default_reference_param(self, TestParams):
params = TestParams()
params.set_state(label0="zero", label1=1)
# value under the default.
curr = params.int_default_param[0]["value"]
adjustment = {"int_default_param": [{"value": curr - 1}]}
params.adjust(adjustment, raise_errors=False)
exp = [f"int_default_param {curr-1} < min 2 default"]
assert params.errors["int_default_param"] == exp
def test_errors_int_param(self, TestParams):
params = TestParams()
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 2.5}]
}
params.adjust(adjustment, raise_errors=False)
exp = {"min_int_param": ["Not a valid integer: 2.5."]}
assert params.errors == exp
def test_errors_multiple_params(self, TestParams):
params = TestParams()
adjustment = {
"min_int_param": [
{"label0": "zero", "label1": 1, "value": "not a number"},
{"label0": "one", "label1": 2, "value": "still not a number"},
],
"date_param": [
{"label0": "zero", "label1": 1, "value": "not a date"}
],
}
params.adjust(adjustment, raise_errors=False)
exp = {
"min_int_param": [
"Not a valid integer: not a number.",
"Not a valid integer: still not a number.",
],
"date_param": ["Not a valid date: not a date."],
}
assert params.errors == exp
def test_list_type_errors(self, TestParams):
params = TestParams()
adj = {
"float_list_param": [
{"value": ["abc", 0, "def", 1], "label0": "zero", "label1": 1},
{"value": [-1, "ijk"], "label0": "one", "label1": 2},
]
}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
exp_user_message = {
"float_list_param": [
"Not a valid number: abc.",
"Not a valid number: def.",
"Not a valid number: ijk.",
]
}
assert json.loads(excinfo.value.args[0]) == {
"errors": exp_user_message
}
exp_internal_message = {
"float_list_param": [
["Not a valid number: abc.", "Not a valid number: def."],
["Not a valid number: ijk."],
]
}
assert excinfo.value.messages["errors"] == exp_internal_message
exp_labels = {
"float_list_param": [
{"label0": "zero", "label1": 1},
{"label0": "one", "label1": 2},
]
}
assert excinfo.value.labels["errors"] == exp_labels
params = TestParams()
adjustment = {"float_param": [2.5]}
params.adjust(adjustment, raise_errors=False)
exp = {"float_param": ["Not a valid number: [2.5]."]}
assert params.errors == exp
params = TestParams()
adjustment = {"bool_param": [False]}
params.adjust(adjustment, raise_errors=False)
exp = {"bool_param": ["Not a valid boolean: [False]."]}
assert params.errors == exp
def test_range_validation_on_list_param(self, TestParams):
params = TestParams()
adj = {
"float_list_param": [
{"value": [-1, 1], "label0": "zero", "label1": 1}
]
}
params.adjust(adj, raise_errors=False)
exp = ["float_list_param[label0=zero, label1=1] [-1.0, 1.0] < min 0 "]
assert params.errors["float_list_param"] == exp
def test_warnings(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"str_choice_warn_param": "not a valid choice"})
assert params.warnings
assert not params.errors
msg = [
'str_choice_warn_param "not a valid choice" must be in list of choices value0, '
"value1."
]
assert (
json.loads(excinfo.value.args[0])["warnings"][
"str_choice_warn_param"
]
== msg
)
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"int_warn_param": -1})
assert params.warnings
assert not params.errors
msg = ["int_warn_param -1 < min 0 "]
assert (
json.loads(excinfo.value.args[0])["warnings"]["int_warn_param"]
== msg
)
def test_ignore_warnings(self, TestParams):
params = TestParams()
params.adjust({"int_warn_param": -2}, ignore_warnings=True)
assert params.int_warn_param == [{"value": -2}]
assert not params.errors
assert not params.warnings
with pytest.raises(ValidationError):
params.adjust({"int_warn_param": "abc"}, ignore_warnings=True)
def test_when_validation(self):
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"value": 2,
},
}
params = Params(array_first=True)
params.adjust({"param": 3})
assert params.param == 3.0
params.adjust({"when_param": -2, "param": 0})
with pytest.raises(ValidationError) as excinfo:
params.adjust({"when_param": -2, "param": -1})
msg = json.loads(excinfo.value.args[0])
assert msg["errors"]["param"] == [
"When when_param is less than 0, param value is invalid: param -1 < min 0 "
]
def test_when_validation_limitations(self):
"""
When validation prohibits child validators from doing referential validation
when the other parameter is an array type (number_dims > 0).
"""
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [0, 0],
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [3, 5],
},
}
with pytest.raises(ParamToolsError) as excinfo:
Params(array_first=True)
cause = excinfo.value.__cause__
msg = cause.args[0]
assert (
msg
== "param is validated against when_param in an invalid context."
)
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [0, 0],
"validators": {
"when": {
"param": "default",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "default"}
},
}
},
}
}
with pytest.raises(ParamToolsError) as excinfo:
Params(array_first=True)
cause = excinfo.value.__cause__
msg = cause.args[0]
assert (
msg == "param is validated against default in an invalid context."
)
def test_when_validation_examples(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"when_param": 2})
params = TestParams()
with pytest.raises(ValidationError):
params.adjust({"when_array_param": [0, 2, 0, 0]})
params = TestParams()
with pytest.raises(ValidationError):
params.adjust({"when_array_param": [0, 1, 0]})
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"float_list_when_param": [-1, 0, 0, 0]})
msg = json.loads(excinfo.value.args[0])
assert len(msg["errors"]["float_list_when_param"]) == 4
def test_when_validation_referential(self):
"""
Test referential validation with when validator.
Check limitations to referential validation with when validator
in test test_when_validation_limitations
"""
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 3,
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"value": 3,
},
}
params = Params(array_first=True)
params.adjust({"param": 4})
params.adjust({"param": 0, "when_param": -1})
params = Params(array_first=True)
with pytest.raises(ValidationError):
params.adjust({"param": -1, "when_param": -2})
params = Params(array_first=True)
with pytest.raises(ValidationError):
params.adjust({"param": params.when_param - 1})
def test_deserialized(self, TestParams):
params = TestParams()
params._adjust({"min_int_param": [{"value": 1}]}, deserialized=True)
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1},
{"label0": "one", "label1": 2, "value": 1},
]
params._adjust(
{"min_int_param": [{"value": -1}]},
raise_errors=False,
deserialized=True,
)
assert params.errors["min_int_param"] == ["min_int_param -1 < min 0 "]
class TestArray:
def test_to_array(self, TestParams):
params = TestParams()
res = params.to_array("int_dense_array_param")
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
],
[
[19, 20, 21],
[22, 23, 24],
[25, 26, 27],
[28, 29, 30],
[31, 32, 33],
[34, 35, 36],
],
]
assert res.tolist() == exp
exp = params.int_dense_array_param
assert params.from_array("int_dense_array_param", res) == exp
val = params.sel["int_dense_array_param"].isel[0]
labels = {lab: val for lab, val in val.items() if lab != "value"}
params.delete({"int_dense_array_param": [dict(labels, value=None)]})
with pytest.raises(SparseValueObjectsException):
params.to_array("int_dense_array_param")
def test_from_array(self, TestParams):
params = TestParams()
with pytest.raises(TypeError):
params.from_array("min_int_param")
def test_resolve_order(self, TestParams):
exp_label_order = ["label0", "label2"]
exp_value_order = {"label0": ["zero", "one"], "label2": [0, 1, 2]}
vi = [
{"label0": "zero", "label2": 0, "value": 1},
{"label0": "zero", "label2": 1, "value": 1},
{"label0": "zero", "label2": 2, "value": 1},
{"label0": "one", "label2": 0, "value": 1},
{"label0": "one", "label2": 1, "value": 1},
{"label0": "one", "label2": 2, "value": 1},
]
params = TestParams()
params.madeup = vi
params._data["madeup"] = {"value": vi, "type": "int"}
value_items = params.select_eq("madeup", False, **params._state)
assert params._resolve_order(
"madeup", value_items, params.label_grid
) == (exp_label_order, exp_value_order)
# test with specified state.
exp_value_order = {"label0": ["zero", "one"], "label2": [0, 1]}
params.set_state(label2=[0, 1])
value_items = params.select_eq("madeup", False, **params._state)
assert params._resolve_order(
"madeup", value_items, params.label_grid
) == (exp_label_order, exp_value_order)
params.madeup[0]["label1"] = 0
value_items = params.select_eq("madeup", False, **params._state)
with pytest.raises(InconsistentLabelsException):
params._resolve_order("madeup", value_items, params.label_grid)
def test_to_array_with_state1(self, TestParams):
params = TestParams()
params.set_state(label0="zero")
res = params.to_array("int_dense_array_param")
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
]
]
assert res.tolist() == exp
assert (
params.from_array("int_dense_array_param", res)
== params.int_dense_array_param
)
params = TestParams()
res = params.to_array("int_dense_array_param", label0="zero")
assert res.tolist() == exp
act = copy.deepcopy(
params.from_array("int_dense_array_param", res, label0="zero")
)
params.set_state(label0="zero")
assert act == params.int_dense_array_param
def test_to_array_with_state2(self, TestParams):
params = TestParams()
# Drop values 3 and 4 from label1
params.set_state(label1=[0, 1, 2, 5])
res = params.to_array("int_dense_array_param")
# Values 3 and 4 were removed from label1.
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
# [10, 11, 12],
# [13, 14, 15],
[16, 17, 18],
],
[
[19, 20, 21],
[22, 23, 24],
[25, 26, 27],
# [28, 29, 30],
# [31, 32, 33],
[34, 35, 36],
],
]
assert res.tolist() == exp
assert (
params.from_array("int_dense_array_param", res)
== params.int_dense_array_param
)
params = TestParams()
res = params.to_array("int_dense_array_param", label1=[0, 1, 2, 5])
assert res.tolist() == exp
act = copy.deepcopy(
params.from_array(
"int_dense_array_param", res, label1=[0, 1, 2, 5]
)
)
params.set_state(label1=[0, 1, 2, 5])
assert act == params.int_dense_array_param
class TestState:
def test_basic_set_state(self, TestParams):
params = TestParams()
assert params.view_state() == {}
params.set_state(label0="zero")
assert params.view_state() == {"label0": ["zero"]}
params.set_state(label1=0)
assert params.view_state() == {"label0": ["zero"], "label1": [0]}
params.set_state(label0="one", label2=1)
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state(**{})
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state()
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state(label1=[1, 2, 3])
assert params.view_state() == {
"label0": ["one"],
"label1": [1, 2, 3],
"label2": [1],
}
def test_label_grid(self, TestParams):
params = TestParams()
exp = {
"label0": ["zero", "one"],
"label1": [0, 1, 2, 3, 4, 5],
"label2": [0, 1, 2],
}
assert params.label_grid == exp
params.set_state(label0="one")
exp = {
"label0": ["one"],
"label1": [0, 1, 2, 3, 4, 5],
"label2": [0, 1, 2],
}
assert params.label_grid == exp
params.set_state(label0="one", label2=1)
exp = {"label0": ["one"], "label1": [0, 1, 2, 3, 4, 5], "label2": [1]}
assert params.label_grid == exp
params.set_state(label1=[0, 1, 2, 5])
exp = {"label0": ["one"], "label1": [0, 1, 2, 5], "label2": [1]}
assert params.label_grid == {
"label0": ["one"],
"label1": [0, 1, 2, 5],
"label2": [1],
}
def test_set_state_updates_values(self, TestParams):
params = TestParams()
defaultexp = [
{"label0": "zero", "label1": 1, "value": 1},
{"label0": "one", "label1": 2, "value": 2},
]
assert params.min_int_param == defaultexp
params.set_state(label0="zero")
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1}
]
# makes sure parameter that doesn't use label0 is unaffected
assert params.str_choice_param == [{"value": "value0"}]
params.clear_state()
assert params.view_state() == {}
assert params.min_int_param == defaultexp
assert params.label_grid == params._stateless_label_grid
def test_set_state_errors(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError):
params.set_state(label0="notalabel")
params = TestParams()
with pytest.raises(ValidationError):
params.set_state(notalabel="notalabel")
def test_state_with_list(self, TestParams):
params = TestParams()
params.set_state(label0="zero", label1=[0, 1])
exp = [
{"label0": "zero", "label1": 0, "label2": 0, "value": 1},
{"label0": "zero", "label1": 0, "label2": 1, "value": 2},
{"label0": "zero", "label1": 0, "label2": 2, "value": 3},
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
]
assert params.int_dense_array_param == exp
class TestArrayFirst:
def test_basic(self, af_params):
assert af_params
assert af_params.min_int_param.tolist() == [[1]]
assert af_params.date_max_param.tolist() == [
[datetime.date(2018, 1, 15)]
]
assert af_params.int_dense_array_param.tolist() == [[[4, 5, 6]]]
assert af_params.str_choice_param == "value0"
def test_from_array(self, af_params):
exp = [
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
]
assert af_params.from_array("int_dense_array_param") == exp
assert (
af_params.from_array(
"int_dense_array_param", af_params.int_dense_array_param
)
== exp
)
def test_to_array_with_nd_lists(self):
class ArrayAdjust(Parameters):
defaults = {
"schema": {
"labels": {
"label1": {
"type": "int",
"validators": {"range": {"min": 0, "max": 5}},
}
}
},
"arr": {
"title": "Array param",
"description": "",
"type": "float",
"number_dims": 1,
"value": [1, 2, 3, 4],
},
"arr_2D": {
"title": "2D Array Param",
"description": "",
"type": "int",
"number_dims": 2,
"value": [[1, 2, 3], [4, 5, 6]],
},
}
array_first = True
params = ArrayAdjust()
assert params
assert isinstance(params.arr, np.ndarray)
assert params.arr.tolist() == [1, 2, 3, 4]
assert isinstance(params.arr_2D, np.ndarray)
assert params.arr_2D.tolist() == [[1, 2, 3], [4, 5, 6]]
params.adjust({"arr": [4, 6, 8], "arr_2D": [[7, 8, 9], [1, 5, 7]]})
assert isinstance(params.arr, np.ndarray)
assert isinstance(params.arr_2D, np.ndarray)
np.testing.assert_allclose(params.arr, [4, 6, 8])
np.testing.assert_allclose(params.arr_2D, [[7, 8, 9], [1, 5, 7]])
with pytest.raises(ParamToolsError):
params.adjust({"arr": [{"label1": 1, "value": [4, 5, 6]}]})
def test_array_first_with_zero_dim(self):
class ZeroDim(Parameters):
defaults = {
"myint": {
"title": "my int",
"description": "",
"type": "int",
"value": 2,
},
"mystring": {
"title": "my string",
"description": "",
"type": "str",
"value": "hello world",
},
}
array_first = True
params = ZeroDim()
assert params.myint == 2.0
assert isinstance(params.myint, np.int64)
assert params.mystring == "hello world"
assert isinstance(params.mystring, str)
class TestCollisions:
def test_collision_list(self):
class CollisionParams(Parameters):
defaults = {"schema": {"labels": {}, "additional_members": {}}}
params = CollisionParams()
# check to make sure that the collisionlist does not need to be updated.
# Note: dir(obj) lists out all class or instance attributes and methods.
assert set(collision_list) == {
name for name in dir(params) if not name.startswith("__")
}
def test_collision(self):
defaults_dict = {
"schema": {"labels": {}, "additional_members": {}},
"errors": {
"title": "Collides with 'errors'",
"description": "",
"notes": "",
"type": "int",
"value": [{"value": 0}],
"validators": {"range": {"min": 0, "max": 10}},
},
}
class CollisionParams(Parameters):
defaults = defaults_dict
with pytest.raises(ParameterNameCollisionException) as excinfo:
CollisionParams()
exp_msg = (
"The paramter name, 'errors', is already used by the "
"Parameters object."
)
assert excinfo.value.args[0] == exp_msg
class TestExtend:
def test_extend_num(self, array_first_defaults):
array_first_defaults = {
"schema": array_first_defaults["schema"],
"int_dense_array_param": array_first_defaults[
"int_dense_array_param"
],
}
new_vos = []
for vo in array_first_defaults["int_dense_array_param"]["value"]:
if vo["label1"] not in (2, 4, 5):
new_vos.append(vo)
array_first_defaults["int_dense_array_param"]["value"] = new_vos
# Where label 1 is 2, 4, and 5, the value is set to the last
# known value, given the value object's label values.
exp = [
{"label0": "zero", "label1": 0, "label2": 0, "value": 1},
{"label0": "zero", "label1": 0, "label2": 1, "value": 2},
{"label0": "zero", "label1": 0, "label2": 2, "value": 3},
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
{"label0": "zero", "label1": 2, "label2": 0, "value": 4},
{"label0": "zero", "label1": 2, "label2": 1, "value": 5},
{"label0": "zero", "label1": 2, "label2": 2, "value": 6},
{"label0": "zero", "label1": 3, "label2": 0, "value": 10},
{"label0": "zero", "label1": 3, "label2": 1, "value": 11},
{"label0": "zero", "label1": 3, "label2": 2, "value": 12},
{"label0": "zero", "label1": 4, "label2": 0, "value": 10},
{"label0": "zero", "label1": 4, "label2": 1, "value": 11},
{"label0": "zero", "label1": 4, "label2": 2, "value": 12},
{"label0": "zero", "label1": 5, "label2": 0, "value": 10},
{"label0": "zero", "label1": 5, "label2": 1, "value": 11},
{"label0": "zero", "label1": 5, "label2": 2, "value": 12},
{"label0": "one", "label1": 0, "label2": 0, "value": 19},
{"label0": "one", "label1": 0, "label2": 1, "value": 20},
{"label0": "one", "label1": 0, "label2": 2, "value": 21},
{"label0": "one", "label1": 1, "label2": 0, "value": 22},
{"label0": "one", "label1": 1, "label2": 1, "value": 23},
{"label0": "one", "label1": 1, "label2": 2, "value": 24},
{"label0": "one", "label1": 2, "label2": 0, "value": 22},
{"label0": "one", "label1": 2, "label2": 1, "value": 23},
{"label0": "one", "label1": 2, "label2": 2, "value": 24},
{"label0": "one", "label1": 3, "label2": 0, "value": 28},
{"label0": "one", "label1": 3, "label2": 1, "value": 29},
{"label0": "one", "label1": 3, "label2": 2, "value": 30},
{"label0": "one", "label1": 4, "label2": 0, "value": 28},
{"label0": "one", "label1": 4, "label2": 1, "value": 29},
{"label0": "one", "label1": 4, "label2": 2, "value": 30},
{"label0": "one", "label1": 5, "label2": 0, "value": 28},
{"label0": "one", "label1": 5, "label2": 1, "value": 29},
{"label0": "one", "label1": 5, "label2": 2, "value": 30},
]
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label1"
array_first = True
params = AFParams()
assert isinstance(params.int_dense_array_param, np.ndarray)
assert params.from_array("int_dense_array_param") == exp
for val in params._data["int_dense_array_param"]["value"]:
if val["label1"] in (2, 4, 5):
assert val["_auto"] is True
else:
assert "_auto" not in val
assert params.dump()["int_dense_array_param"]["value"] == exp
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label1"
array_first = False
params = AFParams()
assert isinstance(params.int_dense_array_param, list)
def test_extend_categorical(self, array_first_defaults):
array_first_defaults = {
"schema": array_first_defaults["schema"],
"int_dense_array_param": array_first_defaults[
"int_dense_array_param"
],
}
new_vos = []
for vo in array_first_defaults["int_dense_array_param"]["value"]:
if vo["label0"] == "one":
vo.update({"value": vo["value"] - 18})
new_vos.append(vo)
array_first_defaults["int_dense_array_param"]["value"] = new_vos
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label0"
array_first = True
params = AFParams()
assert params.int_dense_array_param.tolist()
exp = [
{"label0": "one", "label1": 0, "label2": 0, "value": 1},
{"label0": "one", "label1": 0, "label2": 1, "value": 2},
{"label0": "one", "label1": 0, "label2": 2, "value": 3},
{"label0": "one", "label1": 1, "label2": 0, "value": 4},
{"label0": "one", "label1": 1, "label2": 1, "value": 5},
{"label0": "one", "label1": 1, "label2": 2, "value": 6},
{"label0": "one", "label1": 2, "label2": 0, "value": 7},
{"label0": "one", "label1": 2, "label2": 1, "value": 8},
{"label0": "one", "label1": 2, "label2": 2, "value": 9},
{"label0": "one", "label1": 3, "label2": 0, "value": 10},
{"label0": "one", "label1": 3, "label2": 1, "value": 11},
{"label0": "one", "label1": 3, "label2": 2, "value": 12},
{"label0": "one", "label1": 4, "label2": 0, "value": 13},
{"label0": "one", "label1": 4, "label2": 1, "value": 14},
{"label0": "one", "label1": 4, "label2": 2, "value": 15},
{"label0": "one", "label1": 5, "label2": 0, "value": 16},
{"label0": "one", "label1": 5, "label2": 1, "value": 17},
{"label0": "one", "label1": 5, "label2": 2, "value": 18},
]
assert params.from_array("int_dense_array_param", label0="one") == exp
for val in params._data["int_dense_array_param"]["value"]:
if val["label0"] == "zero":
assert val["_auto"] is True
else:
assert "_auto" not in val
def test_extend_w_array(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[3, 4],
[3, 4],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
def test_extend_adj(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust({"extend_param": [{"d0": 3, "value": -1}]})
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
for val in params._data["extend_param"]["value"]:
# 0, 1 extended at the beginning.
if val["d0"] > 3 or val["d0"] in (0, 1):
assert val["_auto"] is True
else:
assert "_auto" not in val
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "d1": "c1", "value": -1},
{"d0": 3, "d1": "c2", "value": 1},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 5, "d1": "c2", "value": 1},
{"d0": 8, "d1": "c1", "value": 22},
{"d0": 8, "d1": "c2", "value": 23},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 1],
[0, 1],
[0, 1],
[22, 23],
[22, 23],
[22, 23],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 6, "d1": "c2", "value": 1},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, -1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
]
params = ExtParams()
params.adjust({"extend_param": [{"d0": 0, "value": 1}]})
assert params.extend_param.tolist() == [[1, 1]] * 11
def test_extend_adj_without_clobber(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust(
{"extend_param": [{"d0": 3, "value": -1}]}, clobber=False
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "d1": "c1", "value": -1},
{"d0": 3, "d1": "c2", "value": 1},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, 1],
[-1, 1],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 5, "d1": "c2", "value": 1},
{"d0": 8, "d1": "c1", "value": 22},
{"d0": 8, "d1": "c2", "value": 23},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 1],
[0, 1],
[7, 8],
[22, 23],
[22, 23],
[22, 23],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 6, "d1": "c2", "value": 1},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 6],
[0, 1],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust({"extend_param": [{"d0": 0, "value": 1}]}, clobber=False)
assert params.extend_param.tolist() == [
[1, 1],
[1, 1],
[1, 2],
[3, 4],
[3, 4],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
def test_extend_adj_w_errors(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
with pytest.raises(ValidationError):
params.adjust({"extend_param": 102})
params = ExtParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"extend_param": [{"value": 70, "d0": 5}]})
emsg = json.loads(excinfo.value.args[0])
# do=7 is when the 'releated_value' is set to 50, which is
# less than 70 ==> causes range error
assert "d0=7" in emsg["errors"]["extend_param"][0]
params = ExtParams()
before = copy.deepcopy(params.extend_param)
params.adjust(
{"extend_param": [{"value": 70, "d0": 5}]}, raise_errors=False
)
assert params.errors["extend_param"] == emsg["errors"]["extend_param"]
assert np.allclose(params.extend_param, before)
def test_extend_adj_nonextend_param(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust({"nonextend_param": 3})
assert params.nonextend_param == 3
def test_extend_adj_w_set_state(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 2, "value": -1}]})
assert params.extend_param.tolist() == [
# [1, 2],
# [1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 3, "value": -1}]})
assert params.extend_param.tolist() == [
# [1, 2],
# [1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 1, "value": -1}]})
assert params.extend_param.tolist() == []
params.array_first = False
params.clear_state()
params.extend()
params.array_first = True
params.set_state()
assert params.extend_param.tolist() == [
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
def test_extend_method(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
# label_to_extend = "d0"
array_first = False
params = ExtParams()
params.adjust({"extend_param": [{"value": None}]})
params.adjust(
{
"extend_param": [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
]
}
)
params.extend(
label="d0", label_values=[2, 4, 7], params="extend_param"
)
params.sort_values()
assert params.extend_param == [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 4, "d1": "c1", "value": 1, "_auto": True},
{"d0": 4, "d1": "c2", "value": 2, "_auto": True},
{"d0": 7, "d1": "c1", "value": 1, "_auto": True},
{"d0": 7, "d1": "c2", "value": 2, "_auto": True},
]
params = ExtParams()
init = params.select_eq("extend_param")
params.extend(label="d0", label_values=[])
assert init == params.select_eq("extend_param")
params = ExtParams()
params.extend(label="d0", label_values=[8, 9, 10])
params.sort_values()
assert params.extend_param == [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 3, "d1": "c1", "value": 3},
{"d0": 3, "d1": "c2", "value": 4},
{"d0": 5, "d1": "c1", "value": 5},
{"d0": 5, "d1": "c2", "value": 6},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
{"d0": 10, "d1": "c1", "value": 7, "_auto": True},
{"d0": 10, "d1": "c2", "value": 8, "_auto": True},
]
params = ExtParams()
params.extend(label="d0", label_values=[0, 8, 9, 10])
params.sort_values()
assert params.extend_param == [
{"d0": 0, "d1": "c1", "value": 1, "_auto": True},
{"d0": 0, "d1": "c2", "value": 2, "_auto": True},
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 3, "d1": "c1", "value": 3},
{"d0": 3, "d1": "c2", "value": 4},
{"d0": 5, "d1": "c1", "value": 5},
{"d0": 5, "d1": "c2", "value": 6},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
{"d0": 10, "d1": "c1", "value": 7, "_auto": True},
{"d0": 10, "d1": "c2", "value": 8, "_auto": True},
]
def grow(n, r, t):
mult = 1 if t >= 0 else -1
for _ in range(0, abs(t)):
n = round(n * (1 + r) ** mult, 2)
return n
class TestIndex:
def test_index_simple(self, extend_ex_path):
class IndexParams1(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0 for lte in range(10)}
params = IndexParams1()
params.adjust({"indexed_param": [{"d0": 3, "value": 3}]})
assert params.indexed_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
]
for val in params._data["indexed_param"]["value"]:
# 0, 1 extended at the beginning.
if val["d0"] > 3 or val["d0"] in (0, 1):
assert val["_auto"] is True
else:
assert "_auto" not in val
class IndexParams2(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0.02 for lte in range(10)}
params = IndexParams2()
params.adjust({"indexed_param": [{"d0": 3, "value": 3}]})
exp = [
[grow(1, 0.02, -2), grow(2, 0.02, -2)],
[grow(1, 0.02, -1), grow(2, 0.02, -1)],
[grow(1, 0.02, 0), grow(2, 0.02, 0)],
[grow(3, 0.02, 0)] * 2,
[grow(3, 0.02, 1)] * 2,
[grow(3, 0.02, 2)] * 2,
[grow(3, 0.02, 3)] * 2,
[grow(3, 0.02, 4)] * 2,
[grow(3, 0.02, 5)] * 2,
[grow(3, 0.02, 6)] * 2,
[grow(3, 0.02, 7)] * 2,
]
np.testing.assert_allclose(params.indexed_param.tolist(), exp)
def test_related_param_errors(self, extend_ex_path):
class IndexParams2(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0.02 for lte in range(10)}
params = IndexParams2()
with pytest.raises(ValidationError):
params.adjust(
{
"related_param": [{"value": 8.1, "d0": 4}],
"indexed_param": [{"d0": 3, "value": 8}],
}
)
|
import copy
import os
import json
import datetime
from collections import OrderedDict
from random import shuffle
import pytest
import numpy as np
import marshmallow as ma
from paramtools import (
ParamToolsError,
ValidationError,
SparseValueObjectsException,
InconsistentLabelsException,
collision_list,
ParameterNameCollisionException,
register_custom_type,
Parameters,
Values,
Slice,
)
from paramtools.contrib import Bool_
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture
def defaults_spec_path():
return os.path.join(CURRENT_PATH, "defaults.json")
@pytest.fixture
def extend_ex_path():
return os.path.join(CURRENT_PATH, "extend_ex.json")
@pytest.fixture
def array_first_defaults(defaults_spec_path):
with open(defaults_spec_path) as f:
r = json.loads(f.read())
r.pop("float_list_param")
r.pop("simple_int_list_param")
r.pop("float_list_when_param")
r.pop("when_array_param")
return r
@pytest.fixture
def TestParams(defaults_spec_path):
class _TestParams(Parameters):
defaults = defaults_spec_path
return _TestParams
@pytest.fixture(scope="function")
def af_params(array_first_defaults):
class AFParams(Parameters):
defaults = array_first_defaults
_af_params = AFParams(
initial_state={"label0": "zero", "label1": 1}, array_first=True
)
return _af_params
def test_init(TestParams):
params = TestParams()
assert params
assert params._data
for param in params._data:
assert getattr(params, param)
assert params.label_grid
assert params.label_grid == params._stateless_label_grid
class TestSchema:
def test_empty_schema(self):
class Params(Parameters):
array_first = True
defaults = {
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"type": "str",
"value": "hello world",
}
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {}
def test_schema_just_labels(self):
class Params(Parameters):
array_first = True
defaults = {
"schema": {
"labels": {
"somelabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 2}},
}
}
},
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"type": "str",
"value": "hello world",
},
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {"somelabel": [0, 1, 2]}
def test_schema_just_additional_members(self):
class Params(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members": {"additional": {"type": "str"}}
},
"hello_world": {
"title": "Hello, World!",
"description": "Simplest config possible.",
"additional": "I'm extra",
"type": "str",
"value": "hello world",
},
}
params = Params()
assert params.hello_world == "hello world"
assert params.label_grid == {}
def test_schema_not_dropped(self, defaults_spec_path):
with open(defaults_spec_path, "r") as f:
defaults_ = json.loads(f.read())
class TestParams(Parameters):
defaults = defaults_
TestParams()
assert defaults_["schema"]
def test_schema_with_errors(self):
class Params1(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members": {"additional": {"type": 1234}}
}
}
with pytest.raises(ma.ValidationError):
Params1()
class Params2(Parameters):
array_first = True
defaults = {
"schema": {
"additional_members_123": {"additional": {"type": "str"}}
}
}
with pytest.raises(ma.ValidationError):
Params2()
def test_operators_spec(self):
class Params1(Parameters):
array_first = False
defaults = {
"schema": {
"labels": {
"mylabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 10}},
},
"somelabel": {
"type": "int",
"validators": {"range": {"min": 0, "max": 10}},
},
},
"operators": {
"array_first": False,
"label_to_extend": "somelabel",
},
}
}
params = Params1(array_first=True, label_to_extend="mylabel")
assert params.array_first
assert params.label_to_extend == "mylabel"
assert params.operators == {
"array_first": True,
"label_to_extend": "mylabel",
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
Params1.array_first = True
params = Params1()
assert params.array_first
assert params.label_to_extend == "somelabel"
assert params.operators == {
"array_first": True,
"label_to_extend": "somelabel",
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
class Params2(Parameters):
defaults = {"schema": {"operators": {"array_first": True}}}
params = Params2()
assert params.array_first
assert params.label_to_extend is None
assert params.operators == {
"array_first": True,
"label_to_extend": None,
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
class Params3(Parameters):
array_first = True
label_to_extend = "hello"
defaults = {"schema": {"operators": {"array_first": True}}}
params = Params3(array_first=False, label_to_extend=None)
assert params.operators == {
"array_first": False,
"label_to_extend": None,
"uses_extend_func": False,
}
assert params.dump()["schema"]["operators"] == params.operators
params.array_first = True
assert params.dump()["schema"]["operators"] == params.operators
def test_when_schema(self):
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"validators": {
"when": {
"param": "default",
"is": {"less_than": 0, "greater_than": 1},
"then": {"range": {"min": 0}},
"otherwise": {"range": {"min": "default"}},
}
},
}
}
with pytest.raises(ma.ValidationError):
Params()
def test_custom_fields(self):
class Custom(ma.Schema):
hello = ma.fields.Boolean()
world = Bool_() # Tests data is serialized.
register_custom_type("custom_type", ma.fields.Nested(Custom()))
class Params(Parameters):
defaults = {
"schema": {
"labels": {"custom_label": {"type": "custom_type"}},
"additional_members": {"custom": {"type": "custom_type"}},
},
"param": {
"title": "",
"description": "",
"type": "int",
"value": [{"custom_label": {"hello": True}, "value": 0}],
"custom": {"hello": True, "world": True},
},
}
params = Params()
assert params
assert params._data["param"]["custom"] == {
"hello": True,
"world": True,
}
assert params.adjust(
{
"param": [
{
"custom_label": {"hello": True, "world": True},
"value": 1,
}
]
}
)
assert params.sel["param"].isel[:] == [
{"custom_label": {"hello": True}, "value": 0},
{"custom_label": {"hello": True, "world": True}, "value": 1},
]
class BadSpec(Parameters):
field_map = {"custom_type": ma.fields.Nested(Custom)}
defaults = {
"schema": {
"additional_members": {"custom": {"type": "custom_type"}}
},
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"custom": {"hello": 123, "world": "whoops"},
},
}
with pytest.raises(ma.ValidationError):
BadSpec()
class TestValues:
def test(self, TestParams, defaults_spec_path):
params = TestParams()
assert isinstance(params.sel["min_int_param"], Values)
assert isinstance(params.sel["min_int_param"]["label0"], Slice)
with pytest.raises(AttributeError):
params["min_int_param"]
class TestAccess:
def test_specification(self, TestParams, defaults_spec_path):
params = TestParams()
spec1 = params.specification()
with open(defaults_spec_path) as f:
exp = json.loads(f.read())
exp.pop("schema")
assert set(spec1.keys()) == set(exp.keys())
assert spec1["min_int_param"] == exp["min_int_param"]["value"]
def test_is_ordered(self, TestParams):
params = TestParams()
spec1 = params.specification()
assert isinstance(spec1, OrderedDict)
spec2 = params.specification(meta_data=True, serializable=True)
assert isinstance(spec2, OrderedDict)
def test_specification_query(self, TestParams):
params = TestParams()
spec1 = params.specification()
exp = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"max_int_param": [{"label0": "one", "label1": 2, "value": 4}],
}
spec2 = params.specification(label0="one")
# check that specification method got only the value item with label0="one"
assert spec2["min_int_param"] == exp["min_int_param"]
assert spec2["max_int_param"] == exp["max_int_param"]
# check that get method got only value item with label0="one"
params.set_state(label0="one")
assert params.min_int_param == exp["min_int_param"]
assert params.max_int_param == exp["max_int_param"]
# check that specification method gets other data, not containing a label0
# label.
for param, data in spec1.items():
if all("label0" not in val_item for val_item in data):
assert spec2[param] == data
params.delete({"str_choice_param": None})
assert "str_choice_param" not in params.specification()
assert "str_choice_param" in params.specification(include_empty=True)
def test_serializable(self, TestParams, defaults_spec_path):
params = TestParams()
assert json.dumps(params.specification(serializable=True))
assert json.dumps(
params.specification(serializable=True, meta_data=True)
)
spec = params.specification(serializable=True)
# Make sure "value" is removed when meta_data is False
for value in spec.values():
assert "value" not in value
with open(defaults_spec_path) as f:
exp = json.loads(f.read())
exp.pop("schema")
spec = params.specification(serializable=True, meta_data=True)
assert spec == params._defaults_schema.dump(
params._defaults_schema.load(exp)
)
def test_dump(self, TestParams, defaults_spec_path):
params1 = TestParams()
spec = params1.specification(serializable=True, meta_data=True)
schema = params1._schema
dumped = params1.dump()
assert dumped == {**spec, **{"schema": schema}}
class TestParams2(Parameters):
defaults = dumped
params2 = TestParams2()
assert params2.dump() == dumped
def test_dump_with_labels(self, TestParams, defaults_spec_path):
params1 = TestParams()
spec = params1.specification(
serializable=True,
include_empty=True,
meta_data=True,
label0="one",
sort_values=True,
)
schema = params1._schema
params1.set_state(label0="one")
dumped = params1.dump(sort_values=True)
assert dumped == {**spec, **{"schema": schema}}
class TestParams2(Parameters):
defaults = dumped
params2 = TestParams2()
params2.set_state(label0="one")
assert params2.dump() == dumped
def test_iterable(self, TestParams):
params = TestParams()
act = set([])
for param in params:
act.add(param)
assert set(params._data.keys()) == act
assert set(params._data.keys()) == set(params.keys())
for param, data in params.items():
np.testing.assert_equal(data, getattr(params, param))
def test_sort_values(self, TestParams):
"""Ensure sort runs and is stable"""
sorted_tp = TestParams()
sorted_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == TestParams().dump(
sort_values=False
)
shuffled_tp = TestParams()
for param in shuffled_tp:
shuffle(shuffled_tp._data[param]["value"])
shuffled_tp.sel._cache = {}
assert sorted_tp.dump(sort_values=False) != shuffled_tp.dump(
sort_values=False
)
shuffled_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == shuffled_tp.dump(
sort_values=False
)
# Test attribute is updated, too.
for param in sorted_tp:
assert getattr(sorted_tp, param) == getattr(shuffled_tp, param)
def test_sort_values_no_labels(self):
class Params(Parameters):
defaults = {
"test": {
"title": "test",
"description": "",
"type": "int",
"value": 2,
}
}
params = Params()
assert params.sort_values() == params._data
assert params.sort_values({"test": params.test})
assert params.dump()
def test_sort_values_correctness(self):
"""Ensure sort is correct"""
exp = [
{"value": 1},
{"label0": 1, "label1": "one", "value": 1},
{"label0": 1, "label1": "two", "value": 1},
{"label0": 2, "label1": "one", "value": 1},
{"label0": 2, "label1": "two", "value": 1},
{"label0": 3, "label1": "one", "value": 1},
]
shuffled = copy.deepcopy(exp)
shuffle(shuffled)
class Params(Parameters):
defaults = {
"schema": {
"labels": {
"label0": {
"type": "int",
"validators": {"range": {"min": 0, "max": 3}},
},
"label1": {
"type": "str",
"validators": {
"choice": {"choices": ["one", "two"]}
},
},
}
},
"param": {
"title": "test",
"description": "",
"type": "int",
"value": shuffled,
},
}
params = Params(sort_values=False)
assert params.param != exp and params.param == shuffled
params.sort_values()
assert params.param == exp
# test passing in a data object
params = Params(sort_values=False)
assert params.param != exp and params.param == shuffled
data1 = {"param": params.param}
params.sort_values(data1, has_meta_data=False)
data1 = copy.deepcopy(data1)
data2 = {"param": {"value": params.param}}
params.sort_values(data2, has_meta_data=True)
data2 = copy.deepcopy(data2)
params.sort_values()
assert data1["param"] == data2["param"]["value"] == params.param
with pytest.raises(ParamToolsError):
params.sort_values(has_meta_data=False)
def test_dump_sort_values(self, TestParams):
"""Test sort_values keyword in dump()"""
tp = TestParams()
for param in tp:
shuffle(tp._data[param]["value"])
tp.sel._cache = {}
shuffled_dump = tp.dump(sort_values=False)
sorted_dump = tp.dump(sort_values=True)
assert sorted_dump != shuffled_dump
sorted_tp = TestParams()
sorted_tp.sort_values()
assert sorted_tp.dump(sort_values=False) == sorted_dump
# Test that sort works when state is activated
state_tp = TestParams()
for param in tp:
shuffle(state_tp._data[param]["value"])
state_tp.set_state(label0="zero", label2=1)
state_dump = copy.deepcopy(state_tp.dump(sort_values=False))
class NoStateParams(Parameters):
defaults = state_dump
nostate_tp = NoStateParams(sort_values=False)
assert nostate_tp.dump(sort_values=False) == state_dump
assert not nostate_tp.view_state()
assert state_tp.view_state()
assert nostate_tp.dump(sort_values=True) == state_tp.dump(
sort_values=True
)
def test_sort_values_w_array(self, extend_ex_path):
"""Test sort values with array first config"""
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
# Test that param attributes are not updated when
# array first is True
params = ExtParams()
params.extend_param = "don't update me"
params.sort_values()
assert params.extend_param == "don't update me"
def test_sort_values_with_state(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = False
params = ExtParams()
params.set_state(d0=[6, 7, 8, 9])
params.sort_values()
assert params.extend_param == [
{"d0": 6, "d1": "c1", "value": 5, "_auto": True},
{"d0": 6, "d1": "c2", "value": 6, "_auto": True},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
]
class TestAdjust:
def test_adjust_int_param(self, TestParams):
params = TestParams()
params.set_state(label0="one", label1=2)
adjustment = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 3}]
}
params.adjust(adjustment)
assert params.min_int_param == adjustment["min_int_param"]
def test_simultaneous_adjust(self, TestParams):
"""
Adjust min_int_param above original max_int_param value at same time as
max_int_param value is adjusted up. This tests that the new param is
compared against the adjusted reference param if the reference param is
specified.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 4}],
"max_int_param": [{"label0": "zero", "label1": 1, "value": 5}],
}
params.adjust(adjustment)
assert params.min_int_param == adjustment["min_int_param"]
assert params.max_int_param == adjustment["max_int_param"]
def test_transaction(self, TestParams):
"""
Use transaction manager to defer schema level validation until all adjustments
are complete.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 4}],
"max_int_param": [{"label0": "zero", "label1": 1, "value": 5}],
}
with params.transaction(defer_validation=True):
params.adjust({"min_int_param": adjustment["min_int_param"]})
params.adjust({"max_int_param": adjustment["max_int_param"]})
assert params.min_int_param == adjustment["min_int_param"]
assert params.max_int_param == adjustment["max_int_param"]
def test_transaction_with_when_parameter(self, TestParams):
"""
When validator returns None when validate_schema is False for performance
reasons.
"""
params = TestParams()
with params.transaction(defer_validation=True):
params.adjust({"when_param": 2, "str_choice_param": "value1"})
assert params.when_param == [{"value": 2}]
def test_adjust_many_labels(self, TestParams):
"""
Adjust min_int_param above original max_int_param value at same time as
max_int_param value is adjusted up. This tests that the new param is
compared against the adjusted reference param if the reference param is
specified.
"""
params = TestParams()
params.set_state(label0="zero", label1=1)
adjustment = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"int_default_param": 5,
"date_param": [
{"label0": "zero", "label1": 1, "value": "2018-01-17"}
],
}
params.adjust(adjustment)
# min_int_param is adjusted in the _data attribute but the instance
# attribute min_int_param is not.
spec = params.specification(use_state=False, label0="one", label1=2)
assert spec["min_int_param"] == adjustment["min_int_param"]
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1}
]
assert params.int_default_param == [
{"value": adjustment["int_default_param"]}
]
assert params.date_param == [
{
"value": datetime.date(2018, 1, 17),
"label1": 1,
"label0": "zero",
}
]
def test_adjust_none_basic(self, TestParams):
params = TestParams()
adj = {
"min_int_param": [{"label0": "one", "label1": 2, "value": None}],
"str_choice_param": [{"value": None}],
}
params.adjust(adj)
print(params.str_choice_param)
assert len(params.min_int_param) == 1
assert len(params.str_choice_param) == 0
def test_adjust_none_many_values(self, TestParams):
params = TestParams()
adj = {"int_dense_array_param": [{"value": None}]}
params.adjust(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 0
assert len(params.int_dense_array_param) == 0
params = TestParams()
adj = {"int_dense_array_param": [{"label0": "zero", "value": None}]}
params.adjust(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 18
assert len(params.int_dense_array_param) == 18
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="zero"
)["int_dense_array_param"]
)
== 0
)
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="one"
)["int_dense_array_param"]
)
== 18
)
def test_delete(self, TestParams):
params = TestParams()
adj = {
"min_int_param": [{"label0": "one", "label1": 2, "value": 2}],
"str_choice_param": None,
}
params.delete(adj)
assert len(params.min_int_param) == 1
assert len(params.str_choice_param) == 0
params = TestParams()
adj = {"int_dense_array_param": None}
params.delete(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 0
assert len(params.int_dense_array_param) == 0
params = TestParams()
adj = {"int_dense_array_param": [{"label0": "zero", "value": 2}]}
params.delete(adj)
assert len(params._data["int_dense_array_param"]["value"]) == 18
assert len(params.int_dense_array_param) == 18
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="zero"
)["int_dense_array_param"]
)
== 0
)
assert (
len(
params.specification(
use_state=False, include_empty=True, label0="one"
)["int_dense_array_param"]
)
== 18
)
def test_adjust_when_param(self, TestParams):
params = TestParams()
params.adjust({"when_param": 2, "str_choice_param": "value1"})
assert params.when_param == [{"value": 2}]
params = TestParams()
params.adjust({"when_param": 0})
assert params.when_param == [{"value": 0}]
def test_adjust_when_array_param(self, TestParams):
params = TestParams()
params.adjust({"when_array_param": [0, 1, 0, 0]})
assert params.when_array_param == [{"value": [0, 1, 0, 0]}]
def test_adjust_float_list_when_param(self, TestParams):
params = TestParams()
params.adjust({"float_list_when_param": [0, 2.0, 2.0, 2.0]})
assert params.float_list_when_param == [
{"label0": "zero", "value": [0, 2.0, 2.0, 2.0]}
]
class TestValidationMessages:
def test_attributes(self, TestParams):
params = TestParams()
assert params.errors == {}
assert params.warnings == {}
def test_errors(self, TestParams):
params = TestParams()
adj = {"min_int_param": [{"value": "abc"}]}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
exp_user_message = {"min_int_param": ["Not a valid integer: abc."]}
assert json.loads(excinfo.value.args[0]) == {
"errors": exp_user_message
}
exp_internal_message = {
"min_int_param": [["Not a valid integer: abc."]]
}
assert excinfo.value.messages["errors"] == exp_internal_message
exp_labels = {"min_int_param": [{}]}
assert excinfo.value.labels["errors"] == exp_labels
params = TestParams()
adj = {"min_int_param": "abc"}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
def test_label_errors(self, TestParams):
params = TestParams()
params.adjust(
{"min_int_param": [{"value": 2, "label1": 6}]}, raise_errors=False
)
assert params.errors["min_int_param"] == [
"Input 6 must be less than 5."
]
params = TestParams()
params.adjust(
{"min_int_param": [{"value": 2, "label1": -1}]}, raise_errors=False
)
assert params.errors["min_int_param"] == [
"Input -1 must be greater than 0."
]
def test_errors_choice_param(self, TestParams):
params = TestParams()
adjustment = {"str_choice_param": [{"value": "not a valid choice"}]}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = [
'str_choice_param "not a valid choice" must be in list of choices value0, '
"value1."
]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
adjustment = {"str_choice_param": [{"value": 4}]}
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = ["Not a valid string."]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
params.adjust(adjustment, raise_errors=False)
msg = ["Not a valid string."]
assert params.errors["str_choice_param"] == msg
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust(adjustment)
msg = ["Not a valid string."]
assert (
json.loads(excinfo.value.args[0])["errors"]["str_choice_param"]
== msg
)
params = TestParams()
params.adjust(adjustment, raise_errors=False)
params.errors["str_choice_param"] == ["Not a valid string."]
def test_errors_default_reference_param(self, TestParams):
params = TestParams()
params.set_state(label0="zero", label1=1)
# value under the default.
curr = params.int_default_param[0]["value"]
adjustment = {"int_default_param": [{"value": curr - 1}]}
params.adjust(adjustment, raise_errors=False)
exp = [f"int_default_param {curr-1} < min 2 default"]
assert params.errors["int_default_param"] == exp
def test_errors_int_param(self, TestParams):
params = TestParams()
adjustment = {
"min_int_param": [{"label0": "zero", "label1": 1, "value": 2.5}]
}
params.adjust(adjustment, raise_errors=False)
exp = {"min_int_param": ["Not a valid integer: 2.5."]}
assert params.errors == exp
def test_errors_multiple_params(self, TestParams):
params = TestParams()
adjustment = {
"min_int_param": [
{"label0": "zero", "label1": 1, "value": "not a number"},
{"label0": "one", "label1": 2, "value": "still not a number"},
],
"date_param": [
{"label0": "zero", "label1": 1, "value": "not a date"}
],
}
params.adjust(adjustment, raise_errors=False)
exp = {
"min_int_param": [
"Not a valid integer: not a number.",
"Not a valid integer: still not a number.",
],
"date_param": ["Not a valid date: not a date."],
}
assert params.errors == exp
def test_list_type_errors(self, TestParams):
params = TestParams()
adj = {
"float_list_param": [
{"value": ["abc", 0, "def", 1], "label0": "zero", "label1": 1},
{"value": [-1, "ijk"], "label0": "one", "label1": 2},
]
}
with pytest.raises(ValidationError) as excinfo:
params.adjust(adj)
exp_user_message = {
"float_list_param": [
"Not a valid number: abc.",
"Not a valid number: def.",
"Not a valid number: ijk.",
]
}
assert json.loads(excinfo.value.args[0]) == {
"errors": exp_user_message
}
exp_internal_message = {
"float_list_param": [
["Not a valid number: abc.", "Not a valid number: def."],
["Not a valid number: ijk."],
]
}
assert excinfo.value.messages["errors"] == exp_internal_message
exp_labels = {
"float_list_param": [
{"label0": "zero", "label1": 1},
{"label0": "one", "label1": 2},
]
}
assert excinfo.value.labels["errors"] == exp_labels
params = TestParams()
adjustment = {"float_param": [2.5]}
params.adjust(adjustment, raise_errors=False)
exp = {"float_param": ["Not a valid number: [2.5]."]}
assert params.errors == exp
params = TestParams()
adjustment = {"bool_param": [False]}
params.adjust(adjustment, raise_errors=False)
exp = {"bool_param": ["Not a valid boolean: [False]."]}
assert params.errors == exp
def test_range_validation_on_list_param(self, TestParams):
params = TestParams()
adj = {
"float_list_param": [
{"value": [-1, 1], "label0": "zero", "label1": 1}
]
}
params.adjust(adj, raise_errors=False)
exp = ["float_list_param[label0=zero, label1=1] [-1.0, 1.0] < min 0 "]
assert params.errors["float_list_param"] == exp
def test_warnings(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"str_choice_warn_param": "not a valid choice"})
assert params.warnings
assert not params.errors
msg = [
'str_choice_warn_param "not a valid choice" must be in list of choices value0, '
"value1."
]
assert (
json.loads(excinfo.value.args[0])["warnings"][
"str_choice_warn_param"
]
== msg
)
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"int_warn_param": -1})
assert params.warnings
assert not params.errors
msg = ["int_warn_param -1 < min 0 "]
assert (
json.loads(excinfo.value.args[0])["warnings"]["int_warn_param"]
== msg
)
def test_ignore_warnings(self, TestParams):
params = TestParams()
params.adjust({"int_warn_param": -2}, ignore_warnings=True)
assert params.int_warn_param == [{"value": -2}]
assert not params.errors
assert not params.warnings
with pytest.raises(ValidationError):
params.adjust({"int_warn_param": "abc"}, ignore_warnings=True)
def test_when_validation(self):
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 0,
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"value": 2,
},
}
params = Params(array_first=True)
params.adjust({"param": 3})
assert params.param == 3.0
params.adjust({"when_param": -2, "param": 0})
with pytest.raises(ValidationError) as excinfo:
params.adjust({"when_param": -2, "param": -1})
msg = json.loads(excinfo.value.args[0])
assert msg["errors"]["param"] == [
"When when_param is less than 0, param value is invalid: param -1 < min 0 "
]
def test_when_validation_limitations(self):
"""
When validation prohibits child validators from doing referential validation
when the other parameter is an array type (number_dims > 0).
"""
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [0, 0],
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [3, 5],
},
}
with pytest.raises(ParamToolsError) as excinfo:
Params(array_first=True)
cause = excinfo.value.__cause__
msg = cause.args[0]
assert (
msg
== "param is validated against when_param in an invalid context."
)
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"number_dims": 1,
"value": [0, 0],
"validators": {
"when": {
"param": "default",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "default"}
},
}
},
}
}
with pytest.raises(ParamToolsError) as excinfo:
Params(array_first=True)
cause = excinfo.value.__cause__
msg = cause.args[0]
assert (
msg == "param is validated against default in an invalid context."
)
def test_when_validation_examples(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"when_param": 2})
params = TestParams()
with pytest.raises(ValidationError):
params.adjust({"when_array_param": [0, 2, 0, 0]})
params = TestParams()
with pytest.raises(ValidationError):
params.adjust({"when_array_param": [0, 1, 0]})
params = TestParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"float_list_when_param": [-1, 0, 0, 0]})
msg = json.loads(excinfo.value.args[0])
assert len(msg["errors"]["float_list_when_param"]) == 4
def test_when_validation_referential(self):
"""
Test referential validation with when validator.
Check limitations to referential validation with when validator
in test test_when_validation_limitations
"""
class Params(Parameters):
defaults = {
"param": {
"title": "",
"description": "",
"type": "int",
"value": 3,
"validators": {
"when": {
"param": "when_param",
"is": {"less_than": 0},
"then": {"range": {"min": 0}},
"otherwise": {
# only valid for ndim = 0
"range": {"min": "when_param"}
},
}
},
},
"when_param": {
"title": "",
"description": "",
"type": "int",
"value": 3,
},
}
params = Params(array_first=True)
params.adjust({"param": 4})
params.adjust({"param": 0, "when_param": -1})
params = Params(array_first=True)
with pytest.raises(ValidationError):
params.adjust({"param": -1, "when_param": -2})
params = Params(array_first=True)
with pytest.raises(ValidationError):
params.adjust({"param": params.when_param - 1})
def test_deserialized(self, TestParams):
params = TestParams()
params._adjust({"min_int_param": [{"value": 1}]}, deserialized=True)
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1},
{"label0": "one", "label1": 2, "value": 1},
]
params._adjust(
{"min_int_param": [{"value": -1}]},
raise_errors=False,
deserialized=True,
)
assert params.errors["min_int_param"] == ["min_int_param -1 < min 0 "]
class TestArray:
def test_to_array(self, TestParams):
params = TestParams()
res = params.to_array("int_dense_array_param")
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
],
[
[19, 20, 21],
[22, 23, 24],
[25, 26, 27],
[28, 29, 30],
[31, 32, 33],
[34, 35, 36],
],
]
assert res.tolist() == exp
exp = params.int_dense_array_param
assert params.from_array("int_dense_array_param", res) == exp
val = params.sel["int_dense_array_param"].isel[0]
labels = {lab: val for lab, val in val.items() if lab != "value"}
params.delete({"int_dense_array_param": [dict(labels, value=None)]})
with pytest.raises(SparseValueObjectsException):
params.to_array("int_dense_array_param")
def test_from_array(self, TestParams):
params = TestParams()
with pytest.raises(TypeError):
params.from_array("min_int_param")
def test_resolve_order(self, TestParams):
exp_label_order = ["label0", "label2"]
exp_value_order = {"label0": ["zero", "one"], "label2": [0, 1, 2]}
vi = [
{"label0": "zero", "label2": 0, "value": 1},
{"label0": "zero", "label2": 1, "value": 1},
{"label0": "zero", "label2": 2, "value": 1},
{"label0": "one", "label2": 0, "value": 1},
{"label0": "one", "label2": 1, "value": 1},
{"label0": "one", "label2": 2, "value": 1},
]
params = TestParams()
params.madeup = vi
params._data["madeup"] = {"value": vi, "type": "int"}
value_items = params.select_eq("madeup", False, **params._state)
assert params._resolve_order(
"madeup", value_items, params.label_grid
) == (exp_label_order, exp_value_order)
# test with specified state.
exp_value_order = {"label0": ["zero", "one"], "label2": [0, 1]}
params.set_state(label2=[0, 1])
value_items = params.select_eq("madeup", False, **params._state)
assert params._resolve_order(
"madeup", value_items, params.label_grid
) == (exp_label_order, exp_value_order)
params.madeup[0]["label1"] = 0
value_items = params.select_eq("madeup", False, **params._state)
with pytest.raises(InconsistentLabelsException):
params._resolve_order("madeup", value_items, params.label_grid)
def test_to_array_with_state1(self, TestParams):
params = TestParams()
params.set_state(label0="zero")
res = params.to_array("int_dense_array_param")
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
]
]
assert res.tolist() == exp
assert (
params.from_array("int_dense_array_param", res)
== params.int_dense_array_param
)
params = TestParams()
res = params.to_array("int_dense_array_param", label0="zero")
assert res.tolist() == exp
act = copy.deepcopy(
params.from_array("int_dense_array_param", res, label0="zero")
)
params.set_state(label0="zero")
assert act == params.int_dense_array_param
def test_to_array_with_state2(self, TestParams):
params = TestParams()
# Drop values 3 and 4 from label1
params.set_state(label1=[0, 1, 2, 5])
res = params.to_array("int_dense_array_param")
# Values 3 and 4 were removed from label1.
exp = [
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
# [10, 11, 12],
# [13, 14, 15],
[16, 17, 18],
],
[
[19, 20, 21],
[22, 23, 24],
[25, 26, 27],
# [28, 29, 30],
# [31, 32, 33],
[34, 35, 36],
],
]
assert res.tolist() == exp
assert (
params.from_array("int_dense_array_param", res)
== params.int_dense_array_param
)
params = TestParams()
res = params.to_array("int_dense_array_param", label1=[0, 1, 2, 5])
assert res.tolist() == exp
act = copy.deepcopy(
params.from_array(
"int_dense_array_param", res, label1=[0, 1, 2, 5]
)
)
params.set_state(label1=[0, 1, 2, 5])
assert act == params.int_dense_array_param
class TestState:
def test_basic_set_state(self, TestParams):
params = TestParams()
assert params.view_state() == {}
params.set_state(label0="zero")
assert params.view_state() == {"label0": ["zero"]}
params.set_state(label1=0)
assert params.view_state() == {"label0": ["zero"], "label1": [0]}
params.set_state(label0="one", label2=1)
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state(**{})
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state()
assert params.view_state() == {
"label0": ["one"],
"label1": [0],
"label2": [1],
}
params.set_state(label1=[1, 2, 3])
assert params.view_state() == {
"label0": ["one"],
"label1": [1, 2, 3],
"label2": [1],
}
def test_label_grid(self, TestParams):
params = TestParams()
exp = {
"label0": ["zero", "one"],
"label1": [0, 1, 2, 3, 4, 5],
"label2": [0, 1, 2],
}
assert params.label_grid == exp
params.set_state(label0="one")
exp = {
"label0": ["one"],
"label1": [0, 1, 2, 3, 4, 5],
"label2": [0, 1, 2],
}
assert params.label_grid == exp
params.set_state(label0="one", label2=1)
exp = {"label0": ["one"], "label1": [0, 1, 2, 3, 4, 5], "label2": [1]}
assert params.label_grid == exp
params.set_state(label1=[0, 1, 2, 5])
exp = {"label0": ["one"], "label1": [0, 1, 2, 5], "label2": [1]}
assert params.label_grid == {
"label0": ["one"],
"label1": [0, 1, 2, 5],
"label2": [1],
}
def test_set_state_updates_values(self, TestParams):
params = TestParams()
defaultexp = [
{"label0": "zero", "label1": 1, "value": 1},
{"label0": "one", "label1": 2, "value": 2},
]
assert params.min_int_param == defaultexp
params.set_state(label0="zero")
assert params.min_int_param == [
{"label0": "zero", "label1": 1, "value": 1}
]
# makes sure parameter that doesn't use label0 is unaffected
assert params.str_choice_param == [{"value": "value0"}]
params.clear_state()
assert params.view_state() == {}
assert params.min_int_param == defaultexp
assert params.label_grid == params._stateless_label_grid
def test_set_state_errors(self, TestParams):
params = TestParams()
with pytest.raises(ValidationError):
params.set_state(label0="notalabel")
params = TestParams()
with pytest.raises(ValidationError):
params.set_state(notalabel="notalabel")
def test_state_with_list(self, TestParams):
params = TestParams()
params.set_state(label0="zero", label1=[0, 1])
exp = [
{"label0": "zero", "label1": 0, "label2": 0, "value": 1},
{"label0": "zero", "label1": 0, "label2": 1, "value": 2},
{"label0": "zero", "label1": 0, "label2": 2, "value": 3},
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
]
assert params.int_dense_array_param == exp
class TestArrayFirst:
def test_basic(self, af_params):
assert af_params
assert af_params.min_int_param.tolist() == [[1]]
assert af_params.date_max_param.tolist() == [
[datetime.date(2018, 1, 15)]
]
assert af_params.int_dense_array_param.tolist() == [[[4, 5, 6]]]
assert af_params.str_choice_param == "value0"
def test_from_array(self, af_params):
exp = [
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
]
assert af_params.from_array("int_dense_array_param") == exp
assert (
af_params.from_array(
"int_dense_array_param", af_params.int_dense_array_param
)
== exp
)
def test_to_array_with_nd_lists(self):
class ArrayAdjust(Parameters):
defaults = {
"schema": {
"labels": {
"label1": {
"type": "int",
"validators": {"range": {"min": 0, "max": 5}},
}
}
},
"arr": {
"title": "Array param",
"description": "",
"type": "float",
"number_dims": 1,
"value": [1, 2, 3, 4],
},
"arr_2D": {
"title": "2D Array Param",
"description": "",
"type": "int",
"number_dims": 2,
"value": [[1, 2, 3], [4, 5, 6]],
},
}
array_first = True
params = ArrayAdjust()
assert params
assert isinstance(params.arr, np.ndarray)
assert params.arr.tolist() == [1, 2, 3, 4]
assert isinstance(params.arr_2D, np.ndarray)
assert params.arr_2D.tolist() == [[1, 2, 3], [4, 5, 6]]
params.adjust({"arr": [4, 6, 8], "arr_2D": [[7, 8, 9], [1, 5, 7]]})
assert isinstance(params.arr, np.ndarray)
assert isinstance(params.arr_2D, np.ndarray)
np.testing.assert_allclose(params.arr, [4, 6, 8])
np.testing.assert_allclose(params.arr_2D, [[7, 8, 9], [1, 5, 7]])
with pytest.raises(ParamToolsError):
params.adjust({"arr": [{"label1": 1, "value": [4, 5, 6]}]})
def test_array_first_with_zero_dim(self):
class ZeroDim(Parameters):
defaults = {
"myint": {
"title": "my int",
"description": "",
"type": "int",
"value": 2,
},
"mystring": {
"title": "my string",
"description": "",
"type": "str",
"value": "hello world",
},
}
array_first = True
params = ZeroDim()
assert params.myint == 2.0
assert isinstance(params.myint, np.int64)
assert params.mystring == "hello world"
assert isinstance(params.mystring, str)
class TestCollisions:
def test_collision_list(self):
class CollisionParams(Parameters):
defaults = {"schema": {"labels": {}, "additional_members": {}}}
params = CollisionParams()
# check to make sure that the collisionlist does not need to be updated.
# Note: dir(obj) lists out all class or instance attributes and methods.
assert set(collision_list) == {
name for name in dir(params) if not name.startswith("__")
}
def test_collision(self):
defaults_dict = {
"schema": {"labels": {}, "additional_members": {}},
"errors": {
"title": "Collides with 'errors'",
"description": "",
"notes": "",
"type": "int",
"value": [{"value": 0}],
"validators": {"range": {"min": 0, "max": 10}},
},
}
class CollisionParams(Parameters):
defaults = defaults_dict
with pytest.raises(ParameterNameCollisionException) as excinfo:
CollisionParams()
exp_msg = (
"The paramter name, 'errors', is already used by the "
"Parameters object."
)
assert excinfo.value.args[0] == exp_msg
class TestExtend:
def test_extend_num(self, array_first_defaults):
array_first_defaults = {
"schema": array_first_defaults["schema"],
"int_dense_array_param": array_first_defaults[
"int_dense_array_param"
],
}
new_vos = []
for vo in array_first_defaults["int_dense_array_param"]["value"]:
if vo["label1"] not in (2, 4, 5):
new_vos.append(vo)
array_first_defaults["int_dense_array_param"]["value"] = new_vos
# Where label 1 is 2, 4, and 5, the value is set to the last
# known value, given the value object's label values.
exp = [
{"label0": "zero", "label1": 0, "label2": 0, "value": 1},
{"label0": "zero", "label1": 0, "label2": 1, "value": 2},
{"label0": "zero", "label1": 0, "label2": 2, "value": 3},
{"label0": "zero", "label1": 1, "label2": 0, "value": 4},
{"label0": "zero", "label1": 1, "label2": 1, "value": 5},
{"label0": "zero", "label1": 1, "label2": 2, "value": 6},
{"label0": "zero", "label1": 2, "label2": 0, "value": 4},
{"label0": "zero", "label1": 2, "label2": 1, "value": 5},
{"label0": "zero", "label1": 2, "label2": 2, "value": 6},
{"label0": "zero", "label1": 3, "label2": 0, "value": 10},
{"label0": "zero", "label1": 3, "label2": 1, "value": 11},
{"label0": "zero", "label1": 3, "label2": 2, "value": 12},
{"label0": "zero", "label1": 4, "label2": 0, "value": 10},
{"label0": "zero", "label1": 4, "label2": 1, "value": 11},
{"label0": "zero", "label1": 4, "label2": 2, "value": 12},
{"label0": "zero", "label1": 5, "label2": 0, "value": 10},
{"label0": "zero", "label1": 5, "label2": 1, "value": 11},
{"label0": "zero", "label1": 5, "label2": 2, "value": 12},
{"label0": "one", "label1": 0, "label2": 0, "value": 19},
{"label0": "one", "label1": 0, "label2": 1, "value": 20},
{"label0": "one", "label1": 0, "label2": 2, "value": 21},
{"label0": "one", "label1": 1, "label2": 0, "value": 22},
{"label0": "one", "label1": 1, "label2": 1, "value": 23},
{"label0": "one", "label1": 1, "label2": 2, "value": 24},
{"label0": "one", "label1": 2, "label2": 0, "value": 22},
{"label0": "one", "label1": 2, "label2": 1, "value": 23},
{"label0": "one", "label1": 2, "label2": 2, "value": 24},
{"label0": "one", "label1": 3, "label2": 0, "value": 28},
{"label0": "one", "label1": 3, "label2": 1, "value": 29},
{"label0": "one", "label1": 3, "label2": 2, "value": 30},
{"label0": "one", "label1": 4, "label2": 0, "value": 28},
{"label0": "one", "label1": 4, "label2": 1, "value": 29},
{"label0": "one", "label1": 4, "label2": 2, "value": 30},
{"label0": "one", "label1": 5, "label2": 0, "value": 28},
{"label0": "one", "label1": 5, "label2": 1, "value": 29},
{"label0": "one", "label1": 5, "label2": 2, "value": 30},
]
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label1"
array_first = True
params = AFParams()
assert isinstance(params.int_dense_array_param, np.ndarray)
assert params.from_array("int_dense_array_param") == exp
for val in params._data["int_dense_array_param"]["value"]:
if val["label1"] in (2, 4, 5):
assert val["_auto"] is True
else:
assert "_auto" not in val
assert params.dump()["int_dense_array_param"]["value"] == exp
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label1"
array_first = False
params = AFParams()
assert isinstance(params.int_dense_array_param, list)
def test_extend_categorical(self, array_first_defaults):
array_first_defaults = {
"schema": array_first_defaults["schema"],
"int_dense_array_param": array_first_defaults[
"int_dense_array_param"
],
}
new_vos = []
for vo in array_first_defaults["int_dense_array_param"]["value"]:
if vo["label0"] == "one":
vo.update({"value": vo["value"] - 18})
new_vos.append(vo)
array_first_defaults["int_dense_array_param"]["value"] = new_vos
class AFParams(Parameters):
defaults = array_first_defaults
label_to_extend = "label0"
array_first = True
params = AFParams()
assert params.int_dense_array_param.tolist()
exp = [
{"label0": "one", "label1": 0, "label2": 0, "value": 1},
{"label0": "one", "label1": 0, "label2": 1, "value": 2},
{"label0": "one", "label1": 0, "label2": 2, "value": 3},
{"label0": "one", "label1": 1, "label2": 0, "value": 4},
{"label0": "one", "label1": 1, "label2": 1, "value": 5},
{"label0": "one", "label1": 1, "label2": 2, "value": 6},
{"label0": "one", "label1": 2, "label2": 0, "value": 7},
{"label0": "one", "label1": 2, "label2": 1, "value": 8},
{"label0": "one", "label1": 2, "label2": 2, "value": 9},
{"label0": "one", "label1": 3, "label2": 0, "value": 10},
{"label0": "one", "label1": 3, "label2": 1, "value": 11},
{"label0": "one", "label1": 3, "label2": 2, "value": 12},
{"label0": "one", "label1": 4, "label2": 0, "value": 13},
{"label0": "one", "label1": 4, "label2": 1, "value": 14},
{"label0": "one", "label1": 4, "label2": 2, "value": 15},
{"label0": "one", "label1": 5, "label2": 0, "value": 16},
{"label0": "one", "label1": 5, "label2": 1, "value": 17},
{"label0": "one", "label1": 5, "label2": 2, "value": 18},
]
assert params.from_array("int_dense_array_param", label0="one") == exp
for val in params._data["int_dense_array_param"]["value"]:
if val["label0"] == "zero":
assert val["_auto"] is True
else:
assert "_auto" not in val
def test_extend_w_array(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[3, 4],
[3, 4],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
def test_extend_adj(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust({"extend_param": [{"d0": 3, "value": -1}]})
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
for val in params._data["extend_param"]["value"]:
# 0, 1 extended at the beginning.
if val["d0"] > 3 or val["d0"] in (0, 1):
assert val["_auto"] is True
else:
assert "_auto" not in val
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "d1": "c1", "value": -1},
{"d0": 3, "d1": "c2", "value": 1},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
[-1, 1],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 5, "d1": "c2", "value": 1},
{"d0": 8, "d1": "c1", "value": 22},
{"d0": 8, "d1": "c2", "value": 23},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 1],
[0, 1],
[0, 1],
[22, 23],
[22, 23],
[22, 23],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 6, "d1": "c2", "value": 1},
]
}
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, -1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
]
params = ExtParams()
params.adjust({"extend_param": [{"d0": 0, "value": 1}]})
assert params.extend_param.tolist() == [[1, 1]] * 11
def test_extend_adj_without_clobber(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust(
{"extend_param": [{"d0": 3, "value": -1}]}, clobber=False
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "d1": "c1", "value": -1},
{"d0": 3, "d1": "c2", "value": 1},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, 1],
[-1, 1],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 5, "d1": "c2", "value": 1},
{"d0": 8, "d1": "c1", "value": 22},
{"d0": 8, "d1": "c2", "value": 23},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 1],
[0, 1],
[7, 8],
[22, 23],
[22, 23],
[22, 23],
]
params = ExtParams()
params.adjust(
{
"extend_param": [
{"d0": 3, "value": -1},
{"d0": 5, "d1": "c1", "value": 0},
{"d0": 6, "d1": "c2", "value": 1},
]
},
clobber=False,
)
assert params.extend_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[0, 6],
[0, 1],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
params = ExtParams()
params.adjust({"extend_param": [{"d0": 0, "value": 1}]}, clobber=False)
assert params.extend_param.tolist() == [
[1, 1],
[1, 1],
[1, 2],
[3, 4],
[3, 4],
[5, 6],
[5, 6],
[7, 8],
[7, 8],
[7, 8],
[7, 8],
]
def test_extend_adj_w_errors(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
with pytest.raises(ValidationError):
params.adjust({"extend_param": 102})
params = ExtParams()
with pytest.raises(ValidationError) as excinfo:
params.adjust({"extend_param": [{"value": 70, "d0": 5}]})
emsg = json.loads(excinfo.value.args[0])
# do=7 is when the 'releated_value' is set to 50, which is
# less than 70 ==> causes range error
assert "d0=7" in emsg["errors"]["extend_param"][0]
params = ExtParams()
before = copy.deepcopy(params.extend_param)
params.adjust(
{"extend_param": [{"value": 70, "d0": 5}]}, raise_errors=False
)
assert params.errors["extend_param"] == emsg["errors"]["extend_param"]
assert np.allclose(params.extend_param, before)
def test_extend_adj_nonextend_param(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.adjust({"nonextend_param": 3})
assert params.nonextend_param == 3
def test_extend_adj_w_set_state(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 2, "value": -1}]})
assert params.extend_param.tolist() == [
# [1, 2],
# [1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 3, "value": -1}]})
assert params.extend_param.tolist() == [
# [1, 2],
# [1, 2],
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
params = ExtParams()
params.set_state(d0=list(range(2, 11)))
params.adjust({"extend_param": [{"d0": 1, "value": -1}]})
assert params.extend_param.tolist() == []
params.array_first = False
params.clear_state()
params.extend()
params.array_first = True
params.set_state()
assert params.extend_param.tolist() == [
[1, 2],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
]
def test_extend_method(self, extend_ex_path):
class ExtParams(Parameters):
defaults = extend_ex_path
# label_to_extend = "d0"
array_first = False
params = ExtParams()
params.adjust({"extend_param": [{"value": None}]})
params.adjust(
{
"extend_param": [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
]
}
)
params.extend(
label="d0", label_values=[2, 4, 7], params="extend_param"
)
params.sort_values()
assert params.extend_param == [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 4, "d1": "c1", "value": 1, "_auto": True},
{"d0": 4, "d1": "c2", "value": 2, "_auto": True},
{"d0": 7, "d1": "c1", "value": 1, "_auto": True},
{"d0": 7, "d1": "c2", "value": 2, "_auto": True},
]
params = ExtParams()
init = params.select_eq("extend_param")
params.extend(label="d0", label_values=[])
assert init == params.select_eq("extend_param")
params = ExtParams()
params.extend(label="d0", label_values=[8, 9, 10])
params.sort_values()
assert params.extend_param == [
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 3, "d1": "c1", "value": 3},
{"d0": 3, "d1": "c2", "value": 4},
{"d0": 5, "d1": "c1", "value": 5},
{"d0": 5, "d1": "c2", "value": 6},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
{"d0": 10, "d1": "c1", "value": 7, "_auto": True},
{"d0": 10, "d1": "c2", "value": 8, "_auto": True},
]
params = ExtParams()
params.extend(label="d0", label_values=[0, 8, 9, 10])
params.sort_values()
assert params.extend_param == [
{"d0": 0, "d1": "c1", "value": 1, "_auto": True},
{"d0": 0, "d1": "c2", "value": 2, "_auto": True},
{"d0": 2, "d1": "c1", "value": 1},
{"d0": 2, "d1": "c2", "value": 2},
{"d0": 3, "d1": "c1", "value": 3},
{"d0": 3, "d1": "c2", "value": 4},
{"d0": 5, "d1": "c1", "value": 5},
{"d0": 5, "d1": "c2", "value": 6},
{"d0": 7, "d1": "c1", "value": 7},
{"d0": 7, "d1": "c2", "value": 8},
{"d0": 8, "d1": "c1", "value": 7, "_auto": True},
{"d0": 8, "d1": "c2", "value": 8, "_auto": True},
{"d0": 9, "d1": "c1", "value": 7, "_auto": True},
{"d0": 9, "d1": "c2", "value": 8, "_auto": True},
{"d0": 10, "d1": "c1", "value": 7, "_auto": True},
{"d0": 10, "d1": "c2", "value": 8, "_auto": True},
]
def grow(n, r, t):
mult = 1 if t >= 0 else -1
for _ in range(0, abs(t)):
n = round(n * (1 + r) ** mult, 2)
return n
class TestIndex:
def test_index_simple(self, extend_ex_path):
class IndexParams1(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0 for lte in range(10)}
params = IndexParams1()
params.adjust({"indexed_param": [{"d0": 3, "value": 3}]})
assert params.indexed_param.tolist() == [
[1, 2],
[1, 2],
[1, 2],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
]
for val in params._data["indexed_param"]["value"]:
# 0, 1 extended at the beginning.
if val["d0"] > 3 or val["d0"] in (0, 1):
assert val["_auto"] is True
else:
assert "_auto" not in val
class IndexParams2(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0.02 for lte in range(10)}
params = IndexParams2()
params.adjust({"indexed_param": [{"d0": 3, "value": 3}]})
exp = [
[grow(1, 0.02, -2), grow(2, 0.02, -2)],
[grow(1, 0.02, -1), grow(2, 0.02, -1)],
[grow(1, 0.02, 0), grow(2, 0.02, 0)],
[grow(3, 0.02, 0)] * 2,
[grow(3, 0.02, 1)] * 2,
[grow(3, 0.02, 2)] * 2,
[grow(3, 0.02, 3)] * 2,
[grow(3, 0.02, 4)] * 2,
[grow(3, 0.02, 5)] * 2,
[grow(3, 0.02, 6)] * 2,
[grow(3, 0.02, 7)] * 2,
]
np.testing.assert_allclose(params.indexed_param.tolist(), exp)
def test_related_param_errors(self, extend_ex_path):
class IndexParams2(Parameters):
defaults = extend_ex_path
label_to_extend = "d0"
array_first = True
uses_extend_func = True
index_rates = {lte: 0.02 for lte in range(10)}
params = IndexParams2()
with pytest.raises(ValidationError):
params.adjust(
{
"related_param": [{"value": 8.1, "d0": 4}],
"indexed_param": [{"d0": 3, "value": 8}],
}
)
|
en
| 0.828933
|
# Tests data is serialized. # check that specification method got only the value item with label0="one" # check that get method got only value item with label0="one" # check that specification method gets other data, not containing a label0 # label. # Make sure "value" is removed when meta_data is False Ensure sort runs and is stable # Test attribute is updated, too. Ensure sort is correct # test passing in a data object Test sort_values keyword in dump() # Test that sort works when state is activated Test sort values with array first config # Test that param attributes are not updated when # array first is True Adjust min_int_param above original max_int_param value at same time as max_int_param value is adjusted up. This tests that the new param is compared against the adjusted reference param if the reference param is specified. Use transaction manager to defer schema level validation until all adjustments are complete. When validator returns None when validate_schema is False for performance reasons. Adjust min_int_param above original max_int_param value at same time as max_int_param value is adjusted up. This tests that the new param is compared against the adjusted reference param if the reference param is specified. # min_int_param is adjusted in the _data attribute but the instance # attribute min_int_param is not. # value under the default. # only valid for ndim = 0 When validation prohibits child validators from doing referential validation when the other parameter is an array type (number_dims > 0). # only valid for ndim = 0 # only valid for ndim = 0 Test referential validation with when validator. Check limitations to referential validation with when validator in test test_when_validation_limitations # only valid for ndim = 0 # test with specified state. # Drop values 3 and 4 from label1 # Values 3 and 4 were removed from label1. # [10, 11, 12], # [13, 14, 15], # [28, 29, 30], # [31, 32, 33], # makes sure parameter that doesn't use label0 is unaffected # check to make sure that the collisionlist does not need to be updated. # Note: dir(obj) lists out all class or instance attributes and methods. # Where label 1 is 2, 4, and 5, the value is set to the last # known value, given the value object's label values. # 0, 1 extended at the beginning. # do=7 is when the 'releated_value' is set to 50, which is # less than 70 ==> causes range error # [1, 2], # [1, 2], # [1, 2], # [1, 2], # label_to_extend = "d0" # 0, 1 extended at the beginning.
| 2.06913
| 2
|
mistralclient/api/v2/executions.py
|
mail2nsrajesh/python-mistralclient
| 0
|
6627732
|
<gh_stars>0
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_utils import uuidutils
import six
from mistralclient.api import base
urlparse = six.moves.urllib.parse
class Execution(base.Resource):
resource_name = 'Execution'
class ExecutionManager(base.ResourceManager):
resource_class = Execution
def create(self, workflow_identifier, workflow_input=None, description='',
**params):
self._ensure_not_empty(workflow_identifier=workflow_identifier)
data = {
'description': description
}
if uuidutils.is_uuid_like(workflow_identifier):
data.update({'workflow_id': workflow_identifier})
else:
data.update({'workflow_name': workflow_identifier})
if workflow_input:
if isinstance(workflow_input, six.string_types):
data.update({'input': workflow_input})
else:
data.update({'input': json.dumps(workflow_input)})
if params:
data.update({'params': json.dumps(params)})
return self._create('/executions', data)
def update(self, id, state, description=None, env=None):
data = {}
if state:
data['state'] = state
if description:
data['description'] = description
if env:
data['params'] = {'env': env}
return self._update('/executions/%s' % id, data)
def list(self, task=None, marker='', limit=None, sort_keys='',
sort_dirs='', **filters):
qparams = {}
if task:
qparams['task_execution_id'] = task
if marker:
qparams['marker'] = marker
if limit:
qparams['limit'] = limit
if sort_keys:
qparams['sort_keys'] = sort_keys
if sort_dirs:
qparams['sort_dirs'] = sort_dirs
for name, val in filters.items():
qparams[name] = val
query_string = ("?%s" % urlparse.urlencode(list(qparams.items()))
if qparams else "")
return self._list(
'/executions%s' % query_string,
response_key='executions',
)
def get(self, id):
self._ensure_not_empty(id=id)
return self._get('/executions/%s' % id)
def delete(self, id):
self._ensure_not_empty(id=id)
self._delete('/executions/%s' % id)
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_utils import uuidutils
import six
from mistralclient.api import base
urlparse = six.moves.urllib.parse
class Execution(base.Resource):
resource_name = 'Execution'
class ExecutionManager(base.ResourceManager):
resource_class = Execution
def create(self, workflow_identifier, workflow_input=None, description='',
**params):
self._ensure_not_empty(workflow_identifier=workflow_identifier)
data = {
'description': description
}
if uuidutils.is_uuid_like(workflow_identifier):
data.update({'workflow_id': workflow_identifier})
else:
data.update({'workflow_name': workflow_identifier})
if workflow_input:
if isinstance(workflow_input, six.string_types):
data.update({'input': workflow_input})
else:
data.update({'input': json.dumps(workflow_input)})
if params:
data.update({'params': json.dumps(params)})
return self._create('/executions', data)
def update(self, id, state, description=None, env=None):
data = {}
if state:
data['state'] = state
if description:
data['description'] = description
if env:
data['params'] = {'env': env}
return self._update('/executions/%s' % id, data)
def list(self, task=None, marker='', limit=None, sort_keys='',
sort_dirs='', **filters):
qparams = {}
if task:
qparams['task_execution_id'] = task
if marker:
qparams['marker'] = marker
if limit:
qparams['limit'] = limit
if sort_keys:
qparams['sort_keys'] = sort_keys
if sort_dirs:
qparams['sort_dirs'] = sort_dirs
for name, val in filters.items():
qparams[name] = val
query_string = ("?%s" % urlparse.urlencode(list(qparams.items()))
if qparams else "")
return self._list(
'/executions%s' % query_string,
response_key='executions',
)
def get(self, id):
self._ensure_not_empty(id=id)
return self._get('/executions/%s' % id)
def delete(self, id):
self._ensure_not_empty(id=id)
self._delete('/executions/%s' % id)
|
en
| 0.836925
|
# Copyright 2014 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 1.858267
| 2
|
mixin.py
|
yupeng0921/pymixin
| 6
|
6627733
|
<reponame>yupeng0921/pymixin
#!/usr/bin/env python
import sys
import types
__all__ = ['mixin', 'Mixin', 'InstantiationMixinError', 'InvalidMixinError', 'InheritMixinError']
# class_types and add_metaclass were copied from six
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
class_types = type,
else:
class_types = (type, types.ClassType)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class InstantiationMixinError(Exception):
pass
class InvalidMixinError(Exception):
pass
class InheritMixinError(Exception):
pass
def mixin_new(cls, *args, **kwargs):
raise InstantiationMixinError(cls)
MIXIN_CLASS = None
class MixinMeta(type):
def __new__(cls, clsname, bases, dct):
valid_mixin = False
if MIXIN_CLASS == None and clsname == 'Mixin' and bases == (object,):
valid_mixin = True
elif bases == (MIXIN_CLASS,):
valid_mixin = True
elif '__mixin__' in dct:
dct.pop('__mixin__')
valid_mixin = True
if not valid_mixin:
raise InheritMixinError(clsname)
dct['__new__'] = mixin_new
return super(MixinMeta, cls).__new__(cls, clsname, bases, dct)
@add_metaclass(MixinMeta)
class Mixin(object): pass
MIXIN_CLASS = Mixin
def copy_cls_vars(cls):
cls_vars = cls.__dict__.copy()
slots = cls_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
cls_vars.pop(slots_var)
cls_vars.pop('__dict__', None)
cls_vars.pop('__weakref__', None)
return cls_vars
def copy_mixin(cls):
cls_vars = copy_cls_vars(cls)
cls_vars.pop('__new__')
cls_bases = list(cls.__bases__)
if Mixin in cls_bases:
cls_bases.remove(Mixin)
cls_bases.append(object)
return type(cls.__name__, tuple(cls_bases), cls_vars)
def mixin(*clses):
copied_clses = []
for cls in clses:
if type(cls) != MixinMeta:
raise InvalidMixinError(cls)
copied_cls = copy_mixin(cls)
copied_clses.append(copied_cls)
def generate_mixin(orig_cls):
orig_vars = copy_cls_vars(orig_cls)
orig_bases = list(orig_cls.__bases__)
orig_type = type(orig_cls)
if orig_type == MixinMeta:
orig_vars['__mixin__'] = True
if Mixin in orig_bases:
orig_bases.remove(Mixin)
return orig_type(orig_cls.__name__,
tuple(copied_clses) + tuple(orig_bases),
orig_vars)
return generate_mixin
|
#!/usr/bin/env python
import sys
import types
__all__ = ['mixin', 'Mixin', 'InstantiationMixinError', 'InvalidMixinError', 'InheritMixinError']
# class_types and add_metaclass were copied from six
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
class_types = type,
else:
class_types = (type, types.ClassType)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class InstantiationMixinError(Exception):
pass
class InvalidMixinError(Exception):
pass
class InheritMixinError(Exception):
pass
def mixin_new(cls, *args, **kwargs):
raise InstantiationMixinError(cls)
MIXIN_CLASS = None
class MixinMeta(type):
def __new__(cls, clsname, bases, dct):
valid_mixin = False
if MIXIN_CLASS == None and clsname == 'Mixin' and bases == (object,):
valid_mixin = True
elif bases == (MIXIN_CLASS,):
valid_mixin = True
elif '__mixin__' in dct:
dct.pop('__mixin__')
valid_mixin = True
if not valid_mixin:
raise InheritMixinError(clsname)
dct['__new__'] = mixin_new
return super(MixinMeta, cls).__new__(cls, clsname, bases, dct)
@add_metaclass(MixinMeta)
class Mixin(object): pass
MIXIN_CLASS = Mixin
def copy_cls_vars(cls):
cls_vars = cls.__dict__.copy()
slots = cls_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
cls_vars.pop(slots_var)
cls_vars.pop('__dict__', None)
cls_vars.pop('__weakref__', None)
return cls_vars
def copy_mixin(cls):
cls_vars = copy_cls_vars(cls)
cls_vars.pop('__new__')
cls_bases = list(cls.__bases__)
if Mixin in cls_bases:
cls_bases.remove(Mixin)
cls_bases.append(object)
return type(cls.__name__, tuple(cls_bases), cls_vars)
def mixin(*clses):
copied_clses = []
for cls in clses:
if type(cls) != MixinMeta:
raise InvalidMixinError(cls)
copied_cls = copy_mixin(cls)
copied_clses.append(copied_cls)
def generate_mixin(orig_cls):
orig_vars = copy_cls_vars(orig_cls)
orig_bases = list(orig_cls.__bases__)
orig_type = type(orig_cls)
if orig_type == MixinMeta:
orig_vars['__mixin__'] = True
if Mixin in orig_bases:
orig_bases.remove(Mixin)
return orig_type(orig_cls.__name__,
tuple(copied_clses) + tuple(orig_bases),
orig_vars)
return generate_mixin
|
en
| 0.885408
|
#!/usr/bin/env python # class_types and add_metaclass were copied from six Class decorator for creating a class with a metaclass.
| 2.397293
| 2
|
Exercices/chapter_03/stack.py
|
joelwembo/expert-python-dts-algorithms
| 0
|
6627734
|
class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return len(self.items) == 0
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
if self.items:
return self.items[-1]
return None
def __len__(self):
# in python the `len` function is preferred to `size` methods
return len(self.items)
def __bool__(self):
# lets us use the stack as a conditional
return bool(self.items)
|
class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return len(self.items) == 0
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
if self.items:
return self.items[-1]
return None
def __len__(self):
# in python the `len` function is preferred to `size` methods
return len(self.items)
def __bool__(self):
# lets us use the stack as a conditional
return bool(self.items)
|
en
| 0.853619
|
# in python the `len` function is preferred to `size` methods # lets us use the stack as a conditional
| 3.920035
| 4
|
src/utils/beam_search.py
|
r39ashmi/LastMileRoutingResearchChallenge
| 0
|
6627735
|
<reponame>r39ashmi/LastMileRoutingResearchChallenge
import torch
class CachedLookup(object):
def __init__(self, data):
self.orig = data
self.key = None
self.current = None
def __getitem__(self, key):
assert not isinstance(key, slice), "CachedLookup does not support slicing, " \
"you can slice the result of an index operation instead"
assert torch.is_tensor(key) # If tensor, idx all tensors by this tensor:
if self.key is None:
self.key = key
self.current = self.orig[key]
elif len(key) != len(self.key) or (key != self.key).any():
self.key = key
self.current = self.orig[key]
return self.current
|
import torch
class CachedLookup(object):
def __init__(self, data):
self.orig = data
self.key = None
self.current = None
def __getitem__(self, key):
assert not isinstance(key, slice), "CachedLookup does not support slicing, " \
"you can slice the result of an index operation instead"
assert torch.is_tensor(key) # If tensor, idx all tensors by this tensor:
if self.key is None:
self.key = key
self.current = self.orig[key]
elif len(key) != len(self.key) or (key != self.key).any():
self.key = key
self.current = self.orig[key]
return self.current
|
en
| 0.774541
|
# If tensor, idx all tensors by this tensor:
| 2.673756
| 3
|
vi/search.py
|
uri/Vintageous
| 0
|
6627736
|
import sublime
def find_in_range(view, term, start, end, flags=0):
found = view.find(term, start, flags)
if found and found.b <= end:
return found
def find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = find_in_range(view, term, start, end, flags)
# Start searching in the upper half of the buffer if we aren't doing it yet.
if not match and start > current_sel.b:
start = 0
end = current_sel.a
match = find_in_range(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
start = match.b
return match
def reverse_find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = reverse_search(view, term, start, end, flags)
# Start searching in the lower half of the buffer if we aren't doing it yet.
if not match and start < current_sel.b:
start = current_sel.b
end = view.size()
match = reverse_search(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
end = match.a
return match
def find_last_in_range(view, term, start, end, flags=0):
found = find_in_range(view, term, start, end, flags)
last_found = found
while found:
found = find_in_range(view, term, found.b, end, flags)
if not found or found.b > end:
break
last_found = found if found else last_found
return last_found
# reverse search
def reverse_search(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, hi_line.a, min(hi_line.b, end), flags)
def reverse_search_by_pt(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, max(hi_line.a, start), min(hi_line.b, end), flags)
|
import sublime
def find_in_range(view, term, start, end, flags=0):
found = view.find(term, start, flags)
if found and found.b <= end:
return found
def find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = find_in_range(view, term, start, end, flags)
# Start searching in the upper half of the buffer if we aren't doing it yet.
if not match and start > current_sel.b:
start = 0
end = current_sel.a
match = find_in_range(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
start = match.b
return match
def reverse_find_wrapping(view, term, start, end, flags=0, times=1):
current_sel = view.sel()[0]
# Search wrapping around the end of the buffer.
for x in range(times):
match = reverse_search(view, term, start, end, flags)
# Start searching in the lower half of the buffer if we aren't doing it yet.
if not match and start < current_sel.b:
start = current_sel.b
end = view.size()
match = reverse_search(view, term, start, end, flags)
if not match:
return
# No luck in the whole buffer.
elif not match:
return
end = match.a
return match
def find_last_in_range(view, term, start, end, flags=0):
found = find_in_range(view, term, start, end, flags)
last_found = found
while found:
found = find_in_range(view, term, found.b, end, flags)
if not found or found.b > end:
break
last_found = found if found else last_found
return last_found
# reverse search
def reverse_search(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, hi_line.a, min(hi_line.b, end), flags)
def reverse_search_by_pt(view, term, start, end, flags=0):
assert isinstance(start, int) or start is None
assert isinstance(end, int) or end is None
start = start if (start is not None) else 0
end = end if (end is not None) else view.size()
if start < 0 or end > view.size():
return None
lo_line = view.full_line(start)
hi_line = view.full_line(end)
while True:
low_row, hi_row = view.rowcol(lo_line.a)[0], view.rowcol(hi_line.a)[0]
middle_row = (low_row + hi_row) // 2
middle_line = view.full_line(view.text_point(middle_row, 0))
lo_region = sublime.Region(lo_line.a, middle_line.b)
hi_region = sublime.Region(middle_line.b, min(hi_line.b, end))
if find_in_range(view, term, hi_region.a, hi_region.b, flags):
lo_line = view.full_line(middle_line.b)
elif find_in_range(view, term, lo_region.a, lo_region.b, flags):
hi_line = view.full_line(middle_line.a)
else:
return None
if lo_line == hi_line:
# we found the line we were looking for, now extract the match.
return find_last_in_range(view, term, max(hi_line.a, start), min(hi_line.b, end), flags)
|
en
| 0.937762
|
# Search wrapping around the end of the buffer. # Start searching in the upper half of the buffer if we aren't doing it yet. # No luck in the whole buffer. # Search wrapping around the end of the buffer. # Start searching in the lower half of the buffer if we aren't doing it yet. # No luck in the whole buffer. # reverse search # we found the line we were looking for, now extract the match. # we found the line we were looking for, now extract the match.
| 3.135974
| 3
|
rx/core/operators/groupbyuntil.py
|
daliclass/RxPY
| 0
|
6627737
|
from typing import Callable, Optional
from collections import OrderedDict
from rx import operators as ops
from rx.core import Observable, GroupedObservable
from rx.core.typing import Mapper
from rx.subjects import Subject
from rx.disposable import CompositeDisposable, RefCountDisposable, SingleAssignmentDisposable
from rx.internal.basic import identity
def _group_by_until(key_mapper: Mapper,
element_mapper: Optional[Mapper],
duration_mapper: Callable[[GroupedObservable], Observable]
) -> Callable[[Observable], Observable]:
"""Groups the elements of an observable sequence according to a
specified key mapper function. A duration mapper function is used
to control the lifetime of groups. When a group expires, it receives
an OnCompleted notification. When a new element with the same key
value as a reclaimed group occurs, the group will be reborn with a
new lifetime request.
Examples:
>>> group_by_until(lambda x: x.id, None, lambda : rx.never())
>>> group_by_until(lambda x: x.id,lambda x: x.name, lambda grp: rx.never())
Args:
key_mapper: A function to extract the key for each element.
duration_mapper: A function to signal the expiration of a group.
Returns: a sequence of observable groups, each of which corresponds to
a unique key value, containing all elements that share that same key
value. If a group's lifetime expires, a new group with the same key
value can be created once an element with such a key value is
encountered.
"""
element_mapper = element_mapper or identity
def group_by_until(source: Observable) -> Observable:
def subscribe(observer, scheduler=None):
writers = OrderedDict()
group_disposable = CompositeDisposable()
ref_count_disposable = RefCountDisposable(group_disposable)
def on_next(x):
writer = None
key = None
try:
key = key_mapper(x)
except Exception as e:
for wrt in writers.values():
wrt.on_error(e)
observer.on_error(e)
return
fire_new_map_entry = False
writer = writers.get(key)
if not writer:
writer = Subject()
writers[key] = writer
fire_new_map_entry = True
if fire_new_map_entry:
group = GroupedObservable(key, writer, ref_count_disposable)
duration_group = GroupedObservable(key, writer)
try:
duration = duration_mapper(duration_group)
except Exception as e:
for wrt in writers.values():
wrt.on_error(e)
observer.on_error(e)
return
observer.on_next(group)
sad = SingleAssignmentDisposable()
group_disposable.add(sad)
def expire():
if writers[key]:
del writers[key]
writer.on_completed()
group_disposable.remove(sad)
def on_next(value):
pass
def on_error(exn):
for wrt in writers.values():
wrt.on_error(exn)
observer.on_error(exn)
def on_completed():
expire()
sad.disposable = duration.pipe(ops.take(1)).subscribe_(on_next, on_error, on_completed, scheduler)
try:
element = element_mapper(x)
except Exception as error:
for wrt in writers.values():
wrt.on_error(error)
observer.on_error(error)
return
writer.on_next(element)
def on_error(ex):
for wrt in writers.values():
wrt.on_error(ex)
observer.on_error(ex)
def on_completed():
for wrt in writers.values():
wrt.on_completed()
observer.on_completed()
group_disposable.add(source.subscribe_(on_next, on_error, on_completed, scheduler))
return ref_count_disposable
return Observable(subscribe)
return group_by_until
|
from typing import Callable, Optional
from collections import OrderedDict
from rx import operators as ops
from rx.core import Observable, GroupedObservable
from rx.core.typing import Mapper
from rx.subjects import Subject
from rx.disposable import CompositeDisposable, RefCountDisposable, SingleAssignmentDisposable
from rx.internal.basic import identity
def _group_by_until(key_mapper: Mapper,
element_mapper: Optional[Mapper],
duration_mapper: Callable[[GroupedObservable], Observable]
) -> Callable[[Observable], Observable]:
"""Groups the elements of an observable sequence according to a
specified key mapper function. A duration mapper function is used
to control the lifetime of groups. When a group expires, it receives
an OnCompleted notification. When a new element with the same key
value as a reclaimed group occurs, the group will be reborn with a
new lifetime request.
Examples:
>>> group_by_until(lambda x: x.id, None, lambda : rx.never())
>>> group_by_until(lambda x: x.id,lambda x: x.name, lambda grp: rx.never())
Args:
key_mapper: A function to extract the key for each element.
duration_mapper: A function to signal the expiration of a group.
Returns: a sequence of observable groups, each of which corresponds to
a unique key value, containing all elements that share that same key
value. If a group's lifetime expires, a new group with the same key
value can be created once an element with such a key value is
encountered.
"""
element_mapper = element_mapper or identity
def group_by_until(source: Observable) -> Observable:
def subscribe(observer, scheduler=None):
writers = OrderedDict()
group_disposable = CompositeDisposable()
ref_count_disposable = RefCountDisposable(group_disposable)
def on_next(x):
writer = None
key = None
try:
key = key_mapper(x)
except Exception as e:
for wrt in writers.values():
wrt.on_error(e)
observer.on_error(e)
return
fire_new_map_entry = False
writer = writers.get(key)
if not writer:
writer = Subject()
writers[key] = writer
fire_new_map_entry = True
if fire_new_map_entry:
group = GroupedObservable(key, writer, ref_count_disposable)
duration_group = GroupedObservable(key, writer)
try:
duration = duration_mapper(duration_group)
except Exception as e:
for wrt in writers.values():
wrt.on_error(e)
observer.on_error(e)
return
observer.on_next(group)
sad = SingleAssignmentDisposable()
group_disposable.add(sad)
def expire():
if writers[key]:
del writers[key]
writer.on_completed()
group_disposable.remove(sad)
def on_next(value):
pass
def on_error(exn):
for wrt in writers.values():
wrt.on_error(exn)
observer.on_error(exn)
def on_completed():
expire()
sad.disposable = duration.pipe(ops.take(1)).subscribe_(on_next, on_error, on_completed, scheduler)
try:
element = element_mapper(x)
except Exception as error:
for wrt in writers.values():
wrt.on_error(error)
observer.on_error(error)
return
writer.on_next(element)
def on_error(ex):
for wrt in writers.values():
wrt.on_error(ex)
observer.on_error(ex)
def on_completed():
for wrt in writers.values():
wrt.on_completed()
observer.on_completed()
group_disposable.add(source.subscribe_(on_next, on_error, on_completed, scheduler))
return ref_count_disposable
return Observable(subscribe)
return group_by_until
|
en
| 0.721188
|
Groups the elements of an observable sequence according to a specified key mapper function. A duration mapper function is used to control the lifetime of groups. When a group expires, it receives an OnCompleted notification. When a new element with the same key value as a reclaimed group occurs, the group will be reborn with a new lifetime request. Examples: >>> group_by_until(lambda x: x.id, None, lambda : rx.never()) >>> group_by_until(lambda x: x.id,lambda x: x.name, lambda grp: rx.never()) Args: key_mapper: A function to extract the key for each element. duration_mapper: A function to signal the expiration of a group. Returns: a sequence of observable groups, each of which corresponds to a unique key value, containing all elements that share that same key value. If a group's lifetime expires, a new group with the same key value can be created once an element with such a key value is encountered.
| 2.612405
| 3
|
herbie/models/hrrr.py
|
WToma/Herbie
| 0
|
6627738
|
## Added by <NAME>
## July 26, 2021
"""
A Herbie template for the HRRR model.
Because the file path to GRIB2 model data is predictable, we can
template the download URL for model output. Follow this template for
writing your own template file for any model with GRIB2 files available
via https.
Requirements
------------
1. Model GRIB2 file must be available via https
2. Preferably, an .idx file should be available.
3. URL must be consistent across time and products.
Properties
----------
DESCRIPTION : str
A description of the model. Give the full name and the
domain, if relevant. Just infor for the user.
DETAILS : dict
Some additional details about the model. Provide links
to web documentation. Just info for the user.
PRODUCTS : dict
Models usually have different product types. The keys are
used in building the GRIB2 source URL.
ORDER MATTERS -- If product is None, then Herbie uses the first
as default.
*ONLY ONE IS USED (FIRST IS USED IF NOT SET)*
SOURCES : dict
Build the URL for the GRIB2 file for different sources.
The parameters are from arguments passed into the
``herbie.archive.Herbie()`` class.
ORDER MATTERS -- If priority is None, then Herbie searches the
sources in the order given here.
*LOOP THROUGH ALL SOURCES*
LOCALFILE : str
The local file to save the model output. The file will be saved in
``save_dir/model/YYYYmmdd/localFile.grib2``
It is sometimes necessary to add details to maintain unique
filenames (e.g., rrfs needs to have the member number in LOCALFILE).
Optional
--------
IDX_SUFFIX : list
Default value is ["grib.idx"], which is pretty standard.
But for some, like RAP, the idx files are messy and could be a few
different styles.
self.IDX_SUFFIX = [".grb2.inv", ".inv", ".grb.inv"]
*LOOP THROUGH ALL SUFFIXES TO FIND AN INDEX FILE*
IDX_STYLE : {'wgrib2', 'eccodes'}
This defines how the index will be interpreted.
- NCEP products use ``wgrib2`` to create index files.
- ECMWF products use ``eccodes`` to create index files.
"""
from datetime import datetime
class hrrr:
def template(self):
self.DESCRIPTION = "High-Resolution Rapid Refresh - CONUS"
self.DETAILS = {
"NOMADS product description": "https://www.nco.ncep.noaa.gov/pmb/products/hrrr/",
"University of Utah HRRR archive": "http://hrrr.chpc.utah.edu/",
}
self.PRODUCTS = {
"sfc": "2D surface level fields; 3-km resolution",
"prs": "3D pressure level fields; 3-km resolution",
"nat": "Native level fields; 3-km resolution",
"subh": "Subhourly grids; 3-km resolution",
}
self.SOURCES = {
"aws": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"nomads": f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"google": f"https://storage.googleapis.com/high-resolution-rapid-refresh/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"azure": f"https://noaahrrr.blob.core.windows.net/hrrr/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando": f"https://pando-rgw01.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando2": f"https://pando-rgw02.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
}
self.EXPECT_IDX_FILE = "remote"
self.LOCALFILE = f"{self.get_remoteFileName}"
# ----------
# CONDITIONS
# ----------
# Fix Issue #34 (not pretty, but gets the job done for now)
# TODO: Allow Herbie to specify the format of the SOURCE manually
if self.product == "subh" and self.date <= datetime(2018, 9, 16):
# The subhourly filenames are different for older files.
# prepend the self.SOURCES dict with the old filename format.
# This requires an additional arg for `fxx_subh` when calling Herbie
self.SOURCES = {
"aws_old_subh": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}{self.fxx_subh:02d}.grib2",
**self.SOURCES
}
class hrrrak:
def template(self):
self.DESCRIPTION = "High-Resolution Rapid Refresh - Alaska"
self.DETAILS = {
"nomads product description": "https://www.nco.ncep.noaa.gov/pmb/products/hrrr",
}
self.PRODUCTS = {
"prs": "3D pressure level fields; 3-km resolution",
"sfc": "2D surface level fields; 3-km resolution",
"nat": "Native level fields; 3-km resolution",
"subh": "Subhourly grids; 3-km resolution",
}
self.SOURCES = {
"nomads": f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"aws": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"google": f"https://storage.googleapis.com/high-resolution-rapid-refresh/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"azure": f"https://noaahrrr.blob.core.windows.net/hrrr/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"pando": f"https://pando-rgw01.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando2": f"https://pando-rgw02.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
}
self.EXPECT_IDX_FILE = "remote"
self.LOCALFILE = f"{self.get_remoteFileName}"
|
## Added by <NAME>
## July 26, 2021
"""
A Herbie template for the HRRR model.
Because the file path to GRIB2 model data is predictable, we can
template the download URL for model output. Follow this template for
writing your own template file for any model with GRIB2 files available
via https.
Requirements
------------
1. Model GRIB2 file must be available via https
2. Preferably, an .idx file should be available.
3. URL must be consistent across time and products.
Properties
----------
DESCRIPTION : str
A description of the model. Give the full name and the
domain, if relevant. Just infor for the user.
DETAILS : dict
Some additional details about the model. Provide links
to web documentation. Just info for the user.
PRODUCTS : dict
Models usually have different product types. The keys are
used in building the GRIB2 source URL.
ORDER MATTERS -- If product is None, then Herbie uses the first
as default.
*ONLY ONE IS USED (FIRST IS USED IF NOT SET)*
SOURCES : dict
Build the URL for the GRIB2 file for different sources.
The parameters are from arguments passed into the
``herbie.archive.Herbie()`` class.
ORDER MATTERS -- If priority is None, then Herbie searches the
sources in the order given here.
*LOOP THROUGH ALL SOURCES*
LOCALFILE : str
The local file to save the model output. The file will be saved in
``save_dir/model/YYYYmmdd/localFile.grib2``
It is sometimes necessary to add details to maintain unique
filenames (e.g., rrfs needs to have the member number in LOCALFILE).
Optional
--------
IDX_SUFFIX : list
Default value is ["grib.idx"], which is pretty standard.
But for some, like RAP, the idx files are messy and could be a few
different styles.
self.IDX_SUFFIX = [".grb2.inv", ".inv", ".grb.inv"]
*LOOP THROUGH ALL SUFFIXES TO FIND AN INDEX FILE*
IDX_STYLE : {'wgrib2', 'eccodes'}
This defines how the index will be interpreted.
- NCEP products use ``wgrib2`` to create index files.
- ECMWF products use ``eccodes`` to create index files.
"""
from datetime import datetime
class hrrr:
def template(self):
self.DESCRIPTION = "High-Resolution Rapid Refresh - CONUS"
self.DETAILS = {
"NOMADS product description": "https://www.nco.ncep.noaa.gov/pmb/products/hrrr/",
"University of Utah HRRR archive": "http://hrrr.chpc.utah.edu/",
}
self.PRODUCTS = {
"sfc": "2D surface level fields; 3-km resolution",
"prs": "3D pressure level fields; 3-km resolution",
"nat": "Native level fields; 3-km resolution",
"subh": "Subhourly grids; 3-km resolution",
}
self.SOURCES = {
"aws": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"nomads": f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"google": f"https://storage.googleapis.com/high-resolution-rapid-refresh/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"azure": f"https://noaahrrr.blob.core.windows.net/hrrr/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando": f"https://pando-rgw01.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando2": f"https://pando-rgw02.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
}
self.EXPECT_IDX_FILE = "remote"
self.LOCALFILE = f"{self.get_remoteFileName}"
# ----------
# CONDITIONS
# ----------
# Fix Issue #34 (not pretty, but gets the job done for now)
# TODO: Allow Herbie to specify the format of the SOURCE manually
if self.product == "subh" and self.date <= datetime(2018, 9, 16):
# The subhourly filenames are different for older files.
# prepend the self.SOURCES dict with the old filename format.
# This requires an additional arg for `fxx_subh` when calling Herbie
self.SOURCES = {
"aws_old_subh": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/conus/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}{self.fxx_subh:02d}.grib2",
**self.SOURCES
}
class hrrrak:
def template(self):
self.DESCRIPTION = "High-Resolution Rapid Refresh - Alaska"
self.DETAILS = {
"nomads product description": "https://www.nco.ncep.noaa.gov/pmb/products/hrrr",
}
self.PRODUCTS = {
"prs": "3D pressure level fields; 3-km resolution",
"sfc": "2D surface level fields; 3-km resolution",
"nat": "Native level fields; 3-km resolution",
"subh": "Subhourly grids; 3-km resolution",
}
self.SOURCES = {
"nomads": f"https://nomads.ncep.noaa.gov/pub/data/nccf/com/hrrr/prod/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"aws": f"https://noaa-hrrr-bdp-pds.s3.amazonaws.com/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"google": f"https://storage.googleapis.com/high-resolution-rapid-refresh/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"azure": f"https://noaahrrr.blob.core.windows.net/hrrr/hrrr.{self.date:%Y%m%d}/alaska/hrrr.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.ak.grib2",
"pando": f"https://pando-rgw01.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
"pando2": f"https://pando-rgw02.chpc.utah.edu/{self.model}/{self.product}/{self.date:%Y%m%d}/{self.model}.t{self.date:%H}z.wrf{self.product}f{self.fxx:02d}.grib2",
}
self.EXPECT_IDX_FILE = "remote"
self.LOCALFILE = f"{self.get_remoteFileName}"
|
en
| 0.735256
|
## Added by <NAME> ## July 26, 2021 A Herbie template for the HRRR model. Because the file path to GRIB2 model data is predictable, we can template the download URL for model output. Follow this template for writing your own template file for any model with GRIB2 files available via https. Requirements ------------ 1. Model GRIB2 file must be available via https 2. Preferably, an .idx file should be available. 3. URL must be consistent across time and products. Properties ---------- DESCRIPTION : str A description of the model. Give the full name and the domain, if relevant. Just infor for the user. DETAILS : dict Some additional details about the model. Provide links to web documentation. Just info for the user. PRODUCTS : dict Models usually have different product types. The keys are used in building the GRIB2 source URL. ORDER MATTERS -- If product is None, then Herbie uses the first as default. *ONLY ONE IS USED (FIRST IS USED IF NOT SET)* SOURCES : dict Build the URL for the GRIB2 file for different sources. The parameters are from arguments passed into the ``herbie.archive.Herbie()`` class. ORDER MATTERS -- If priority is None, then Herbie searches the sources in the order given here. *LOOP THROUGH ALL SOURCES* LOCALFILE : str The local file to save the model output. The file will be saved in ``save_dir/model/YYYYmmdd/localFile.grib2`` It is sometimes necessary to add details to maintain unique filenames (e.g., rrfs needs to have the member number in LOCALFILE). Optional -------- IDX_SUFFIX : list Default value is ["grib.idx"], which is pretty standard. But for some, like RAP, the idx files are messy and could be a few different styles. self.IDX_SUFFIX = [".grb2.inv", ".inv", ".grb.inv"] *LOOP THROUGH ALL SUFFIXES TO FIND AN INDEX FILE* IDX_STYLE : {'wgrib2', 'eccodes'} This defines how the index will be interpreted. - NCEP products use ``wgrib2`` to create index files. - ECMWF products use ``eccodes`` to create index files. # ---------- # CONDITIONS # ---------- # Fix Issue #34 (not pretty, but gets the job done for now) # TODO: Allow Herbie to specify the format of the SOURCE manually # The subhourly filenames are different for older files. # prepend the self.SOURCES dict with the old filename format. # This requires an additional arg for `fxx_subh` when calling Herbie
| 2.699209
| 3
|
hypha/apply/activity/migrations/0032_migrate_submission_to_generic_event.py
|
maxpearl/hypha
| 20
|
6627739
|
<filename>hypha/apply/activity/migrations/0032_migrate_submission_to_generic_event.py
# Generated by Django 2.0.13 on 2019-07-10 22:36
from django.db import migrations
from django.db.models import F
def submission_to_source(apps, schema_editor):
Event = apps.get_model('activity', 'Event')
if Event.objects.exists():
ContentType = apps.get_model('contenttypes', 'ContentType')
content_type = ContentType.objects.get(model='applicationsubmission', app_label='funds')
Event.objects.update(
object_id=F('submission_id'),
content_type=content_type,
)
def source_to_submission(apps, schema_editor):
Event = apps.get_model('activity', 'Event')
Event.objects.update(submission_id=F('object_id'))
class Migration(migrations.Migration):
dependencies = [
('activity', '0031_add_generic_fk_to_event'),
('funds', '0065_applicationsubmission_meta_categories'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.RunPython(submission_to_source, source_to_submission)
]
|
<filename>hypha/apply/activity/migrations/0032_migrate_submission_to_generic_event.py
# Generated by Django 2.0.13 on 2019-07-10 22:36
from django.db import migrations
from django.db.models import F
def submission_to_source(apps, schema_editor):
Event = apps.get_model('activity', 'Event')
if Event.objects.exists():
ContentType = apps.get_model('contenttypes', 'ContentType')
content_type = ContentType.objects.get(model='applicationsubmission', app_label='funds')
Event.objects.update(
object_id=F('submission_id'),
content_type=content_type,
)
def source_to_submission(apps, schema_editor):
Event = apps.get_model('activity', 'Event')
Event.objects.update(submission_id=F('object_id'))
class Migration(migrations.Migration):
dependencies = [
('activity', '0031_add_generic_fk_to_event'),
('funds', '0065_applicationsubmission_meta_categories'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.RunPython(submission_to_source, source_to_submission)
]
|
en
| 0.78738
|
# Generated by Django 2.0.13 on 2019-07-10 22:36
| 1.540578
| 2
|
app/core/middleware.py
|
elifesciences/digests
| 0
|
6627740
|
from logging import getLogger
from typing import Callable
from django.conf import settings
from django.http.request import HttpRequest as Request
from django.http.response import HttpResponse as Response
from django.views.decorators.cache import patch_cache_control
LOGGER = getLogger(__name__)
def _set_can_modify(request: Request, state: bool) -> Request:
request.META[settings.AUTHORIZATION_MODIFICATION_HEADER] = state
return request
def _set_can_preview(request: Request, state: bool) -> Request:
request.META[settings.AUTHORIZATION_PREVIEW_HEADER] = state
return request
def kong_authentication(get_response: Callable[[Request], Response]) \
-> Callable[[Request], Response]:
def middleware(request: Request):
can_preview = False
can_modify = False
groups_header = request.META.get(settings.CONSUMER_GROUPS_HEADER, None)
if groups_header:
groups = [group.strip() for group in groups_header.split(',')]
LOGGER.debug('user groups: %s', groups)
if 'view-unpublished-content' in groups:
can_preview = True
else:
LOGGER.debug('setting request as user cannot view '
'unpublished content/cannot preview')
if 'edit-digests' in groups:
can_modify = True
else:
LOGGER.debug('setting request as user cannot modify digests')
request = _set_can_preview(_set_can_modify(request, can_modify), can_preview)
return get_response(request)
return middleware
def downstream_caching(get_response: Callable[[Request], Response]) \
-> Callable[[Request], Response]:
def middleware(request: Request):
public_headers = {
'public': True,
'max-age': 60 * 5,
'stale-while-revalidate': 60 * 5,
'stale-if-error': (60 * 60) * 24,
}
private_headers = {
'private': True,
'max-age': 0,
'must-revalidate': True,
}
response = get_response(request)
if not response.get('Cache-Control'):
if request.META.get(settings.AUTHORIZATION_PREVIEW_HEADER, False):
cache_headers = private_headers
else:
cache_headers = public_headers
patch_cache_control(response, **cache_headers)
return response
return middleware
|
from logging import getLogger
from typing import Callable
from django.conf import settings
from django.http.request import HttpRequest as Request
from django.http.response import HttpResponse as Response
from django.views.decorators.cache import patch_cache_control
LOGGER = getLogger(__name__)
def _set_can_modify(request: Request, state: bool) -> Request:
request.META[settings.AUTHORIZATION_MODIFICATION_HEADER] = state
return request
def _set_can_preview(request: Request, state: bool) -> Request:
request.META[settings.AUTHORIZATION_PREVIEW_HEADER] = state
return request
def kong_authentication(get_response: Callable[[Request], Response]) \
-> Callable[[Request], Response]:
def middleware(request: Request):
can_preview = False
can_modify = False
groups_header = request.META.get(settings.CONSUMER_GROUPS_HEADER, None)
if groups_header:
groups = [group.strip() for group in groups_header.split(',')]
LOGGER.debug('user groups: %s', groups)
if 'view-unpublished-content' in groups:
can_preview = True
else:
LOGGER.debug('setting request as user cannot view '
'unpublished content/cannot preview')
if 'edit-digests' in groups:
can_modify = True
else:
LOGGER.debug('setting request as user cannot modify digests')
request = _set_can_preview(_set_can_modify(request, can_modify), can_preview)
return get_response(request)
return middleware
def downstream_caching(get_response: Callable[[Request], Response]) \
-> Callable[[Request], Response]:
def middleware(request: Request):
public_headers = {
'public': True,
'max-age': 60 * 5,
'stale-while-revalidate': 60 * 5,
'stale-if-error': (60 * 60) * 24,
}
private_headers = {
'private': True,
'max-age': 0,
'must-revalidate': True,
}
response = get_response(request)
if not response.get('Cache-Control'):
if request.META.get(settings.AUTHORIZATION_PREVIEW_HEADER, False):
cache_headers = private_headers
else:
cache_headers = public_headers
patch_cache_control(response, **cache_headers)
return response
return middleware
|
none
| 1
| 2.019236
| 2
|
|
EvalBox/Attack/AdvAttack/rfgsm.py
|
Yzx835/AISafety
| 32
|
6627741
|
#!/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2019-03-27 09:40:58
@LastEditTime: 2019-04-15 09:23:19
"""
import numpy as np
import torch
from torch.autograd import Variable
from EvalBox.Attack.AdvAttack.attack import Attack
class RFGSM(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, **kwargs):
"""
@description: Random FGSM
@param {
model:
device:
kwargs:
}
@return: None
"""
super(RFGSM, self).__init__(model, device, IsTargeted)
self.criterion = torch.nn.CrossEntropyLoss()
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
"""
@description:
@param {
epsilon:
alpha:
}
@return: None
"""
self.eps = float(kwargs.get("epsilon", 0.1))
self.alp = float(kwargs.get("alpha", 0.5))
def generate(self, xs=None, ys=None):
"""
@description:
@param {
xs:
ys:
}
@return: adv_xs
"""
device = self.device
targeted = self.IsTargeted
copy_xs = np.copy(xs.numpy())
copy_xs = copy_xs + self.alp * self.eps * np.sign(
np.float32(np.random.randn(*copy_xs.shape))
)
copy_xs = np.clip(copy_xs, 0.0, 1.0)
eps = (1.0 - self.alp) * self.eps
var_xs = torch.tensor(
copy_xs, dtype=torch.float, device=device, requires_grad=True
)
var_ys = torch.tensor(ys, device=device)
outputs = self.model(var_xs)
loss = self.criterion(outputs, var_ys)
if targeted:
loss = -self.criterion(outputs, var_ys)
loss.backward()
grad_sign = var_xs.grad.data.sign().cpu().numpy()
copy_xs = np.clip(copy_xs + eps * grad_sign, 0.0, 1.0)
adv_xs = torch.from_numpy(copy_xs)
return adv_xs
|
#!/usr/bin/env python
# coding=UTF-8
"""
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2019-03-27 09:40:58
@LastEditTime: 2019-04-15 09:23:19
"""
import numpy as np
import torch
from torch.autograd import Variable
from EvalBox.Attack.AdvAttack.attack import Attack
class RFGSM(Attack):
def __init__(self, model=None, device=None, IsTargeted=None, **kwargs):
"""
@description: Random FGSM
@param {
model:
device:
kwargs:
}
@return: None
"""
super(RFGSM, self).__init__(model, device, IsTargeted)
self.criterion = torch.nn.CrossEntropyLoss()
self._parse_params(**kwargs)
def _parse_params(self, **kwargs):
"""
@description:
@param {
epsilon:
alpha:
}
@return: None
"""
self.eps = float(kwargs.get("epsilon", 0.1))
self.alp = float(kwargs.get("alpha", 0.5))
def generate(self, xs=None, ys=None):
"""
@description:
@param {
xs:
ys:
}
@return: adv_xs
"""
device = self.device
targeted = self.IsTargeted
copy_xs = np.copy(xs.numpy())
copy_xs = copy_xs + self.alp * self.eps * np.sign(
np.float32(np.random.randn(*copy_xs.shape))
)
copy_xs = np.clip(copy_xs, 0.0, 1.0)
eps = (1.0 - self.alp) * self.eps
var_xs = torch.tensor(
copy_xs, dtype=torch.float, device=device, requires_grad=True
)
var_ys = torch.tensor(ys, device=device)
outputs = self.model(var_xs)
loss = self.criterion(outputs, var_ys)
if targeted:
loss = -self.criterion(outputs, var_ys)
loss.backward()
grad_sign = var_xs.grad.data.sign().cpu().numpy()
copy_xs = np.clip(copy_xs + eps * grad_sign, 0.0, 1.0)
adv_xs = torch.from_numpy(copy_xs)
return adv_xs
|
en
| 0.359954
|
#!/usr/bin/env python # coding=UTF-8 @Author: <NAME> @LastEditors: <NAME> @Description: @Date: 2019-03-27 09:40:58 @LastEditTime: 2019-04-15 09:23:19 @description: Random FGSM @param { model: device: kwargs: } @return: None @description: @param { epsilon: alpha: } @return: None @description: @param { xs: ys: } @return: adv_xs
| 2.286981
| 2
|
src/services/Schedule/test.py
|
IAPark/PITherm
| 0
|
6627742
|
<reponame>IAPark/PITherm
import subprocess
import unittest
from datetime import datetime
import requests
import time
from pymongo import MongoClient
class TestSchedule(unittest.TestCase):
url = "http://localhost:5003"
schedule = subprocess.Popen(["python", ["Schedule.py"]])
def test_add_to_schedule(self):
requests.post(self.url + "/schedule/", json={
"start": 10,
"end": 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
r = requests.get(self.url + "/schedule/")
self.assertEquals(r.json()["data"][0]["start"], 10)
self.assertEquals(r.json()["data"][0]["end"], 10)
self.assertEquals(r.json()["data"][0]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
self.assertEquals(len(r.json()["data"]), 1)
def test_add_to_schedule_weekly(self):
requests.post(self.url + "/schedule/weekly/", json={
"week_time": 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
r = requests.get(self.url + "/schedule/weekly/")
self.assertEquals(r.json()["data"][0]["week_time"], 10)
self.assertEquals(r.json()["data"][0]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
self.assertEquals(len(r.json()["data"]), 1)
def test_current(self):
now = int(time.time())
requests.post(self.url + "/schedule/", json={
"start": now,
"end": now + 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
requests.post(self.url + "/schedule/", json={
"start": now - 10,
"end": now,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
requests.post(self.url + "/schedule/", json={
"start": now + 3,
"end": now + 100,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/schedule/current")
self.assertEquals(r.json()["data"]["start"], now)
self.assertEquals(r.json()["data"]["end"], now + 10)
self.assertEquals(r.json()["data"]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
def test_current_weekly(self):
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now + 10,
"state": {
"AC_target": 0,
"heater_target": 0,
"fan": True
}
})
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 11,
"state": {
"AC_target": 120,
"heater_target": 0,
"fan": True
}
})
time.sleep(1)
r = requests.get(self.url + "/schedule/weekly/current")
self.assertEquals(r.json()["data"]["week_time"], now - 10)
self.assertEquals(r.json()["data"]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
def test_get_state_defaults_to_non_weekly(self):
unix_time = int(time.time())
requests.post(self.url + "/schedule/", json={
"start": unix_time,
"end": unix_time + 10,
"state": {
"AC_target": 160,
"heater_target": 0,
"fan": False
}
})
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/state/" + str(unix_time + 3))
self.assertEquals(r.json()["data"],
{
"AC_target": 160,
"heater_target": 0,
"fan": False
})
def test_get_state_use_weekly(self):
unix_time = int(time.time())
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/state/" + str(unix_time + 3))
self.assertEquals(r.json()["data"],
{
"AC_target": 100,
"heater_target": 0,
"fan": True
})
@classmethod
def tearDownClass(cls):
cls.schedule.kill()
cls.schedule.terminate()
def setUp(self):
client = MongoClient()
client.PITherm.state_changes_weekly.remove()
client.PITherm.state_changes.remove()
def tearDown(self):
self.schedule.kill()
self.schedule.terminate()
|
import subprocess
import unittest
from datetime import datetime
import requests
import time
from pymongo import MongoClient
class TestSchedule(unittest.TestCase):
url = "http://localhost:5003"
schedule = subprocess.Popen(["python", ["Schedule.py"]])
def test_add_to_schedule(self):
requests.post(self.url + "/schedule/", json={
"start": 10,
"end": 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
r = requests.get(self.url + "/schedule/")
self.assertEquals(r.json()["data"][0]["start"], 10)
self.assertEquals(r.json()["data"][0]["end"], 10)
self.assertEquals(r.json()["data"][0]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
self.assertEquals(len(r.json()["data"]), 1)
def test_add_to_schedule_weekly(self):
requests.post(self.url + "/schedule/weekly/", json={
"week_time": 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
r = requests.get(self.url + "/schedule/weekly/")
self.assertEquals(r.json()["data"][0]["week_time"], 10)
self.assertEquals(r.json()["data"][0]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
self.assertEquals(len(r.json()["data"]), 1)
def test_current(self):
now = int(time.time())
requests.post(self.url + "/schedule/", json={
"start": now,
"end": now + 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
requests.post(self.url + "/schedule/", json={
"start": now - 10,
"end": now,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
requests.post(self.url + "/schedule/", json={
"start": now + 3,
"end": now + 100,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/schedule/current")
self.assertEquals(r.json()["data"]["start"], now)
self.assertEquals(r.json()["data"]["end"], now + 10)
self.assertEquals(r.json()["data"]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
def test_current_weekly(self):
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now + 10,
"state": {
"AC_target": 0,
"heater_target": 0,
"fan": True
}
})
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": False
}
})
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 11,
"state": {
"AC_target": 120,
"heater_target": 0,
"fan": True
}
})
time.sleep(1)
r = requests.get(self.url + "/schedule/weekly/current")
self.assertEquals(r.json()["data"]["week_time"], now - 10)
self.assertEquals(r.json()["data"]["state"],
{
"AC_target": 100,
"heater_target": 0,
"fan": False
})
def test_get_state_defaults_to_non_weekly(self):
unix_time = int(time.time())
requests.post(self.url + "/schedule/", json={
"start": unix_time,
"end": unix_time + 10,
"state": {
"AC_target": 160,
"heater_target": 0,
"fan": False
}
})
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/state/" + str(unix_time + 3))
self.assertEquals(r.json()["data"],
{
"AC_target": 160,
"heater_target": 0,
"fan": False
})
def test_get_state_use_weekly(self):
unix_time = int(time.time())
now = datetime.utcnow().weekday() * 24 * 60 ** 2 + (datetime.utcnow().hour * 60 + datetime.utcnow().minute) * 60
requests.post(self.url + "/schedule/weekly/", json={
"week_time": now - 10,
"state": {
"AC_target": 100,
"heater_target": 0,
"fan": True
}
})
r = requests.get(self.url + "/state/" + str(unix_time + 3))
self.assertEquals(r.json()["data"],
{
"AC_target": 100,
"heater_target": 0,
"fan": True
})
@classmethod
def tearDownClass(cls):
cls.schedule.kill()
cls.schedule.terminate()
def setUp(self):
client = MongoClient()
client.PITherm.state_changes_weekly.remove()
client.PITherm.state_changes.remove()
def tearDown(self):
self.schedule.kill()
self.schedule.terminate()
|
none
| 1
| 2.580853
| 3
|
|
test/test_customer_api.py
|
MPW1412/kimai-python
| 6
|
6627743
|
# coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kimai_python
from kimai_python.api.customer_api import CustomerApi # noqa: E501
from kimai_python.rest import ApiException
class TestCustomerApi(unittest.TestCase):
"""CustomerApi unit test stubs"""
def setUp(self):
self.api = kimai_python.api.customer_api.CustomerApi() # noqa: E501
def tearDown(self):
pass
def test_api_customers_get(self):
"""Test case for api_customers_get
Returns a collection of customers # noqa: E501
"""
pass
def test_api_customers_id_get(self):
"""Test case for api_customers_id_get
Returns one customer # noqa: E501
"""
pass
def test_api_customers_id_meta_patch(self):
"""Test case for api_customers_id_meta_patch
Sets the value of a meta-field for an existing customer # noqa: E501
"""
pass
def test_api_customers_id_patch(self):
"""Test case for api_customers_id_patch
Update an existing customer # noqa: E501
"""
pass
def test_api_customers_id_rates_get(self):
"""Test case for api_customers_id_rates_get
Returns a collection of all rates for one customer # noqa: E501
"""
pass
def test_api_customers_id_rates_post(self):
"""Test case for api_customers_id_rates_post
Adds a new rate to a customer # noqa: E501
"""
pass
def test_api_customers_id_rates_rate_id_delete(self):
"""Test case for api_customers_id_rates_rate_id_delete
Deletes one rate for an customer # noqa: E501
"""
pass
def test_api_customers_post(self):
"""Test case for api_customers_post
Creates a new customer # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
Kimai 2 - API Docs
JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501
OpenAPI spec version: 0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kimai_python
from kimai_python.api.customer_api import CustomerApi # noqa: E501
from kimai_python.rest import ApiException
class TestCustomerApi(unittest.TestCase):
"""CustomerApi unit test stubs"""
def setUp(self):
self.api = kimai_python.api.customer_api.CustomerApi() # noqa: E501
def tearDown(self):
pass
def test_api_customers_get(self):
"""Test case for api_customers_get
Returns a collection of customers # noqa: E501
"""
pass
def test_api_customers_id_get(self):
"""Test case for api_customers_id_get
Returns one customer # noqa: E501
"""
pass
def test_api_customers_id_meta_patch(self):
"""Test case for api_customers_id_meta_patch
Sets the value of a meta-field for an existing customer # noqa: E501
"""
pass
def test_api_customers_id_patch(self):
"""Test case for api_customers_id_patch
Update an existing customer # noqa: E501
"""
pass
def test_api_customers_id_rates_get(self):
"""Test case for api_customers_id_rates_get
Returns a collection of all rates for one customer # noqa: E501
"""
pass
def test_api_customers_id_rates_post(self):
"""Test case for api_customers_id_rates_post
Adds a new rate to a customer # noqa: E501
"""
pass
def test_api_customers_id_rates_rate_id_delete(self):
"""Test case for api_customers_id_rates_rate_id_delete
Deletes one rate for an customer # noqa: E501
"""
pass
def test_api_customers_post(self):
"""Test case for api_customers_post
Creates a new customer # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
en
| 0.694738
|
# coding: utf-8 Kimai 2 - API Docs JSON API for the Kimai 2 time-tracking software. Read more about its usage in the [API documentation](https://www.kimai.org/documentation/rest-api.html) and then download a [Swagger file](doc.json) for import e.g. in Postman. Be aware: it is not yet considered stable and BC breaks might happen. # noqa: E501 OpenAPI spec version: 0.5 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: E501 CustomerApi unit test stubs # noqa: E501 Test case for api_customers_get Returns a collection of customers # noqa: E501 Test case for api_customers_id_get Returns one customer # noqa: E501 Test case for api_customers_id_meta_patch Sets the value of a meta-field for an existing customer # noqa: E501 Test case for api_customers_id_patch Update an existing customer # noqa: E501 Test case for api_customers_id_rates_get Returns a collection of all rates for one customer # noqa: E501 Test case for api_customers_id_rates_post Adds a new rate to a customer # noqa: E501 Test case for api_customers_id_rates_rate_id_delete Deletes one rate for an customer # noqa: E501 Test case for api_customers_post Creates a new customer # noqa: E501
| 2.032381
| 2
|
tests/test_validators.py
|
papaemmelab/toil_example
| 0
|
6627744
|
<gh_stars>0
"""toil_example validators tests."""
from os.path import join
import pytest
from toil_example import exceptions
from toil_example import validators
def test_validate_patterns_are_files(tmpdir):
"""Create multiple files and test test_validate_patterns_are_files."""
tmpdir_path = str(tmpdir)
for i in range(5):
with open(join(tmpdir_path, "empty" + str(i)), "w"):
pass
for i in range(11, 15):
with open(join(tmpdir_path, "not_empty" + str(i)), "w") as f:
f.write("I'm not empty.")
empty = [join(tmpdir_path, "empty*")]
not_empty = [join(tmpdir_path, "not_empty*")]
none_existing = ["florentino", "ariza*"]
none_file = [tmpdir_path]
# check empty files exist
assert validators.validate_patterns_are_files(empty, check_size=False)
# check files exist amd are not empty
assert validators.validate_patterns_are_files(not_empty, check_size=True)
# check that empty files raise error with default setting
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty, check_size=True)
# check that pattern is not file
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(none_file, check_size=True)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty, check_size=True)
# check that invalid patterns raise validationerror error
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(none_existing, check_size=True)
def test_validate_patterns_are_dirs(tmpdir):
"""test_validate_patterns_are_dirs."""
tmpdir_path = str(tmpdir)
file_patterns = [join(tmpdir_path, "a_file")]
existing_patterns = [tmpdir_path]
none_existing_patterns = ["florentino", "ariza*"]
# touch the file
with open(file_patterns[0], "w"):
pass
# check empty files exist
assert validators.validate_patterns_are_dirs(existing_patterns)
# check that empty files raise error with default setting
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_dirs(none_existing_patterns)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_dirs(file_patterns)
|
"""toil_example validators tests."""
from os.path import join
import pytest
from toil_example import exceptions
from toil_example import validators
def test_validate_patterns_are_files(tmpdir):
"""Create multiple files and test test_validate_patterns_are_files."""
tmpdir_path = str(tmpdir)
for i in range(5):
with open(join(tmpdir_path, "empty" + str(i)), "w"):
pass
for i in range(11, 15):
with open(join(tmpdir_path, "not_empty" + str(i)), "w") as f:
f.write("I'm not empty.")
empty = [join(tmpdir_path, "empty*")]
not_empty = [join(tmpdir_path, "not_empty*")]
none_existing = ["florentino", "ariza*"]
none_file = [tmpdir_path]
# check empty files exist
assert validators.validate_patterns_are_files(empty, check_size=False)
# check files exist amd are not empty
assert validators.validate_patterns_are_files(not_empty, check_size=True)
# check that empty files raise error with default setting
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty, check_size=True)
# check that pattern is not file
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(none_file, check_size=True)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(empty, check_size=True)
# check that invalid patterns raise validationerror error
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_files(none_existing, check_size=True)
def test_validate_patterns_are_dirs(tmpdir):
"""test_validate_patterns_are_dirs."""
tmpdir_path = str(tmpdir)
file_patterns = [join(tmpdir_path, "a_file")]
existing_patterns = [tmpdir_path]
none_existing_patterns = ["florentino", "ariza*"]
# touch the file
with open(file_patterns[0], "w"):
pass
# check empty files exist
assert validators.validate_patterns_are_dirs(existing_patterns)
# check that empty files raise error with default setting
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_dirs(none_existing_patterns)
# check that empty files raise error with flag
with pytest.raises(exceptions.ValidationError):
validators.validate_patterns_are_dirs(file_patterns)
|
en
| 0.526105
|
toil_example validators tests. Create multiple files and test test_validate_patterns_are_files. # check empty files exist # check files exist amd are not empty # check that empty files raise error with default setting # check that empty files raise error with flag # check that pattern is not file # check that empty files raise error with flag # check that invalid patterns raise validationerror error test_validate_patterns_are_dirs. # touch the file # check empty files exist # check that empty files raise error with default setting # check that empty files raise error with flag
| 2.72209
| 3
|
tests/test_brew_list.py
|
locriandev/art-bot
| 4
|
6627745
|
<reponame>locriandev/art-bot
import flexmock
import pytest
from unittest.mock import patch, MagicMock
from artbotlib import brew_list
@pytest.mark.parametrize("params, expected",
[
[("4.5",), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5"],
[("4.5", "s390x"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5-s390x"],
]
)
def test_rhcos_release_url(params, expected):
assert expected == brew_list._rhcos_release_url(*params)
@pytest.mark.parametrize("params, expected",
[
[("4.2", "spam"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.2/spam"],
[("4.5", "eggs"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5/eggs/x86_64"],
[("4.5", "bacon", "s390x"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5-s390x/bacon/s390x"],
]
)
def test_rhcos_build_url(params, expected):
assert expected == brew_list._rhcos_build_url(*params)
@pytest.mark.parametrize("param, expected",
[
("3.11", ["rhaos-3.11-rhel-7-candidate"]),
("4.1", ["rhaos-4.1-rhel-7-candidate", "rhaos-4.1-rhel-8-candidate"]),
("spam", ["rhaos-spam-rhel-7-candidate", "rhaos-spam-rhel-8-candidate"]),
]
)
def test_tags_for_version(param, expected):
assert expected == brew_list._tags_for_version(param)
def _urlopen_cm(mock_urlopen, content, rc=200):
cm = MagicMock()
cm.getcode.return_value = rc
cm.read.return_value = content
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
@patch('urllib.request.urlopen')
@pytest.mark.parametrize("content, expected",
[
[b'{ "builds": [] }', None],
[b'{ "builds": ["spam", "eggs", "bacon"] }', "spam"],
[b'{ "builds": [ {"id": "cleese"} ] }', "cleese"],
]
)
def test_find_latest_rhcos_build_id(mock_urlopen, content, expected):
so = MagicMock()
_urlopen_cm(mock_urlopen, content)
assert expected == brew_list._find_latest_rhcos_build_id(so, "dummy")
@patch('urllib.request.urlopen')
@pytest.mark.parametrize("content, expected",
[
[b'{ }', set()],
[
b'''{
"rpmostree.rpmdb.pkglist" : [
[
"NetworkManager",
"1",
"1.20.0",
"5.el8_1",
"x86_64"
],
[
"NetworkManager-libnm",
"1",
"1.20.0",
"5.el8_1",
"x86_64"
]
]
}''',
set(["NetworkManager-1.20.0-5.el8_1", "NetworkManager-libnm-1.20.0-5.el8_1"]),
],
]
)
def test_find_latest_rhcos_build_rpms(mock_urlopen, content, expected):
so = MagicMock()
flexmock(brew_list, _find_latest_rhcos_build_id="dummy")
_urlopen_cm(mock_urlopen, content)
assert expected == brew_list._find_rhcos_build_rpms(so, "m_m")
@pytest.mark.parametrize("pkg_name, tag1_builds, tag2_builds, tag1_rpms, tag2_rpms, expected",
[
(
"spam",
[dict(build_id="id1")],
[dict(build_id="id2")],
[dict(name="spam"), dict(name="spam-devel")],
[dict(name="python3-spam")],
dict(spam=set(["spam", "spam-devel", "python3-spam"])),
),
(
"spam", [], [],
[dict(name="spam"), dict(name="spam-devel")], [],
dict(spam=set(["spam", "spam-devel"])),
),
]
)
def test_find_rpms_in_packages(pkg_name, tag1_builds, tag2_builds, tag1_rpms, tag2_rpms, expected):
koji_api = flexmock()
koji_api.should_receive("getLatestBuilds").and_return(tag1_builds).and_return(tag2_builds)
koji_api.should_receive("listBuildRPMs").and_return(tag1_rpms).and_return(tag2_rpms)
# in the case where no builds are tagged, these will be hit to find a build
koji_api.should_receive("getPackage").and_return(dict(id="dummy"))
koji_api.should_receive("listBuilds").and_return([dict(build_id="dummy")])
assert expected == brew_list._find_rpms_in_packages(koji_api, [pkg_name], "4.3")
@pytest.mark.parametrize("rpm_nvrs, rpms_search, expected_rpms4img, expected_rpms",
[
(
["spam-1.0-1.el8", "bacon-eggs-2.3-4.el7"], # rpms from rhcos build
set(["spam", "bacon"]), # rpms we're looking for
dict(RHCOS=set(["spam-1.0-1.el8"])), # filtered first by second
set(["spam"]), # rpms we saw
),
]
)
def test_index_rpms_in_rhcos(rpm_nvrs, rpms_search, expected_rpms4img, expected_rpms):
rpms_for_image = {}
rpms_seen = set()
brew_list._index_rpms_in_rhcos(rpm_nvrs, rpms_search, rpms_for_image, rpms_seen)
assert expected_rpms4img == rpms_for_image
assert expected_rpms == rpms_seen
@pytest.mark.parametrize("rpms_for_image_nvr, rpms_search, expected_rpms4img, expected_rpms",
[
(
dict( # images and the rpms that are in them
image1=["SpAm-1.0-1.el8.noarch", "bacon-eggs-2.3-4.el7.noarch"],
image2=["SpAm-2.0-1.el8.noarch", "eggs-3.4-5.el7.noarch"],
image3=["john-2.0-1.el8.noarch", "cleese-3.4-5.el7.noarch"],
),
set(["spam", "bacon", "eggs"]), # rpms we're looking for, lowercase
dict( # filtered by search set, arch removed
image1=set(["SpAm-1.0-1.el8"]),
image2=set(["SpAm-2.0-1.el8", "eggs-3.4-5.el7"]),
),
set(["spam", "eggs"]), # rpms we saw
),
]
)
def test_index_rpms_in_images(rpms_for_image_nvr, rpms_search, expected_rpms4img, expected_rpms):
rpms_for_image = {}
rpms_seen = set()
image_nvrs = rpms_for_image_nvr.keys()
(
flexmock(brew_list).should_receive("brew_list_components")
.and_return(rpms_for_image_nvr[nvr] for nvr in image_nvrs).one_by_one()
)
brew_list._index_rpms_in_images(image_nvrs, rpms_search, rpms_for_image, rpms_seen)
assert expected_rpms4img == rpms_for_image
assert expected_rpms == rpms_seen
class MockSlackOutput:
def __init__(self):
self.said = ""
self.said_monitoring = ""
def say(self, msg):
self.said += msg
def monitoring_say(self, msg):
self.said_monitoring += msg
def snippet(self, payload, intro, filename):
self.said += f"{intro}\n{payload}"
@pytest.fixture
def so():
return MockSlackOutput()
def test_list_uses_of_rpms_invalid_name(so):
brew_list.list_uses_of_rpms(so, ",,,,", "4", "0", search_type="RPM")
assert "Invalid RPM name" in so.said
def test_list_uses_of_rpms_brew_failure(so):
flexmock(brew_list.util).should_receive("koji_client_session").and_raise(Exception("bork"))
brew_list.list_uses_of_rpms(so, "spam", "4", "0")
assert "bork" in so.said_monitoring
assert "Failed to connect to brew" in so.said
def test_list_uses_of_rpms_unknown_packages(so):
flexmock(brew_list.util).should_receive("koji_client_session").and_return(object())
flexmock(brew_list).should_receive("_find_rpms_in_packages").and_return({})
brew_list.list_uses_of_rpms(so, "spam", "4", "0", "package")
assert "Could not find any package" in so.said
@pytest.mark.parametrize("names, rpms_for_package, rpms_for_image, rhcos_rpms, expect_to_say, expect_not_to_say",
[
( # basic search by package
"spam,eggs", # names the user is searching for
dict(spam=["spam-eggs", "spam-sausage"], eggs=["eggs"]), # rpms built for pkgs (for a package search)
dict(imgspam=["sausage-4.0-1.el8.noarch"]), # images containing rpms
["sausage-4.0-1.el8.noarch"], # rpms in rhcos
["nothing in 4.0 uses that"], # should see this (none of the search rpms were present)
["sausage"], # should not see
),
( # package search where some but not all are missing
"spam,eggs,bacon", # names the user is searching for
dict(spam=["spam-eggs", "spam-sausage"], bacon=["bacon"]), # rpms built for pkgs (for a package search)
dict(imgspam=["spam-eggs-4.0-1.el8.noarch"]), # images containing rpms
["sausage-4.0-1.el8.noarch"], # rpms in rhcos
[ # should see
"Could not find package(s) ['eggs'] in brew",
"package spam includes rpm(s): {'spam-eggs'}",
"imgspam uses {'spam-eggs",
],
["spam-sausage"], # should not see
),
( # basic search by rpm
"spam,eggs", # names the user is searching for
None, # not a pkg search, names are rpm names
dict(imgspam=["spam-4.0-1.el8.noarch"]), # images containing rpms
["eggs-4.0-1.el8", "baked-beans-4-1.el8"], # rpms in rhcos
["imgspam uses {'spam-4.0-1.el8'}", "RHCOS uses {'eggs-4.0-1.el8'}"], # should see these
["baked-beans"], # should not see
),
]
)
def test_list_uses_of_pkgs(so, names, rpms_for_package, rpms_for_image, rhcos_rpms, expect_to_say, expect_not_to_say):
major, minor = "4", "0"
search_type = "package" if rpms_for_package else "rpm"
flexmock(brew_list.util).should_receive("koji_client_session").and_return(object())
flexmock(brew_list).should_receive("_find_rpms_in_packages").and_return(rpms_for_package)
flexmock(brew_list).should_receive("latest_images_for_version").and_return(rpms_for_image.keys())
flexmock(brew_list, brew_list_components=lambda nvr: rpms_for_image[nvr])
flexmock(brew_list).should_receive("_find_rhcos_build_rpms").and_return(rhcos_rpms)
brew_list.list_uses_of_rpms(so, names, major, minor, search_type)
for phrase in expect_to_say:
assert phrase in so.said
for phrase in expect_not_to_say:
assert phrase not in so.said
|
import flexmock
import pytest
from unittest.mock import patch, MagicMock
from artbotlib import brew_list
@pytest.mark.parametrize("params, expected",
[
[("4.5",), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5"],
[("4.5", "s390x"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5-s390x"],
]
)
def test_rhcos_release_url(params, expected):
assert expected == brew_list._rhcos_release_url(*params)
@pytest.mark.parametrize("params, expected",
[
[("4.2", "spam"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.2/spam"],
[("4.5", "eggs"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5/eggs/x86_64"],
[("4.5", "bacon", "s390x"), f"{brew_list.RHCOS_BASE_URL}/rhcos-4.5-s390x/bacon/s390x"],
]
)
def test_rhcos_build_url(params, expected):
assert expected == brew_list._rhcos_build_url(*params)
@pytest.mark.parametrize("param, expected",
[
("3.11", ["rhaos-3.11-rhel-7-candidate"]),
("4.1", ["rhaos-4.1-rhel-7-candidate", "rhaos-4.1-rhel-8-candidate"]),
("spam", ["rhaos-spam-rhel-7-candidate", "rhaos-spam-rhel-8-candidate"]),
]
)
def test_tags_for_version(param, expected):
assert expected == brew_list._tags_for_version(param)
def _urlopen_cm(mock_urlopen, content, rc=200):
cm = MagicMock()
cm.getcode.return_value = rc
cm.read.return_value = content
cm.__enter__.return_value = cm
mock_urlopen.return_value = cm
@patch('urllib.request.urlopen')
@pytest.mark.parametrize("content, expected",
[
[b'{ "builds": [] }', None],
[b'{ "builds": ["spam", "eggs", "bacon"] }', "spam"],
[b'{ "builds": [ {"id": "cleese"} ] }', "cleese"],
]
)
def test_find_latest_rhcos_build_id(mock_urlopen, content, expected):
so = MagicMock()
_urlopen_cm(mock_urlopen, content)
assert expected == brew_list._find_latest_rhcos_build_id(so, "dummy")
@patch('urllib.request.urlopen')
@pytest.mark.parametrize("content, expected",
[
[b'{ }', set()],
[
b'''{
"rpmostree.rpmdb.pkglist" : [
[
"NetworkManager",
"1",
"1.20.0",
"5.el8_1",
"x86_64"
],
[
"NetworkManager-libnm",
"1",
"1.20.0",
"5.el8_1",
"x86_64"
]
]
}''',
set(["NetworkManager-1.20.0-5.el8_1", "NetworkManager-libnm-1.20.0-5.el8_1"]),
],
]
)
def test_find_latest_rhcos_build_rpms(mock_urlopen, content, expected):
so = MagicMock()
flexmock(brew_list, _find_latest_rhcos_build_id="dummy")
_urlopen_cm(mock_urlopen, content)
assert expected == brew_list._find_rhcos_build_rpms(so, "m_m")
@pytest.mark.parametrize("pkg_name, tag1_builds, tag2_builds, tag1_rpms, tag2_rpms, expected",
[
(
"spam",
[dict(build_id="id1")],
[dict(build_id="id2")],
[dict(name="spam"), dict(name="spam-devel")],
[dict(name="python3-spam")],
dict(spam=set(["spam", "spam-devel", "python3-spam"])),
),
(
"spam", [], [],
[dict(name="spam"), dict(name="spam-devel")], [],
dict(spam=set(["spam", "spam-devel"])),
),
]
)
def test_find_rpms_in_packages(pkg_name, tag1_builds, tag2_builds, tag1_rpms, tag2_rpms, expected):
koji_api = flexmock()
koji_api.should_receive("getLatestBuilds").and_return(tag1_builds).and_return(tag2_builds)
koji_api.should_receive("listBuildRPMs").and_return(tag1_rpms).and_return(tag2_rpms)
# in the case where no builds are tagged, these will be hit to find a build
koji_api.should_receive("getPackage").and_return(dict(id="dummy"))
koji_api.should_receive("listBuilds").and_return([dict(build_id="dummy")])
assert expected == brew_list._find_rpms_in_packages(koji_api, [pkg_name], "4.3")
@pytest.mark.parametrize("rpm_nvrs, rpms_search, expected_rpms4img, expected_rpms",
[
(
["spam-1.0-1.el8", "bacon-eggs-2.3-4.el7"], # rpms from rhcos build
set(["spam", "bacon"]), # rpms we're looking for
dict(RHCOS=set(["spam-1.0-1.el8"])), # filtered first by second
set(["spam"]), # rpms we saw
),
]
)
def test_index_rpms_in_rhcos(rpm_nvrs, rpms_search, expected_rpms4img, expected_rpms):
rpms_for_image = {}
rpms_seen = set()
brew_list._index_rpms_in_rhcos(rpm_nvrs, rpms_search, rpms_for_image, rpms_seen)
assert expected_rpms4img == rpms_for_image
assert expected_rpms == rpms_seen
@pytest.mark.parametrize("rpms_for_image_nvr, rpms_search, expected_rpms4img, expected_rpms",
[
(
dict( # images and the rpms that are in them
image1=["SpAm-1.0-1.el8.noarch", "bacon-eggs-2.3-4.el7.noarch"],
image2=["SpAm-2.0-1.el8.noarch", "eggs-3.4-5.el7.noarch"],
image3=["john-2.0-1.el8.noarch", "cleese-3.4-5.el7.noarch"],
),
set(["spam", "bacon", "eggs"]), # rpms we're looking for, lowercase
dict( # filtered by search set, arch removed
image1=set(["SpAm-1.0-1.el8"]),
image2=set(["SpAm-2.0-1.el8", "eggs-3.4-5.el7"]),
),
set(["spam", "eggs"]), # rpms we saw
),
]
)
def test_index_rpms_in_images(rpms_for_image_nvr, rpms_search, expected_rpms4img, expected_rpms):
rpms_for_image = {}
rpms_seen = set()
image_nvrs = rpms_for_image_nvr.keys()
(
flexmock(brew_list).should_receive("brew_list_components")
.and_return(rpms_for_image_nvr[nvr] for nvr in image_nvrs).one_by_one()
)
brew_list._index_rpms_in_images(image_nvrs, rpms_search, rpms_for_image, rpms_seen)
assert expected_rpms4img == rpms_for_image
assert expected_rpms == rpms_seen
class MockSlackOutput:
def __init__(self):
self.said = ""
self.said_monitoring = ""
def say(self, msg):
self.said += msg
def monitoring_say(self, msg):
self.said_monitoring += msg
def snippet(self, payload, intro, filename):
self.said += f"{intro}\n{payload}"
@pytest.fixture
def so():
return MockSlackOutput()
def test_list_uses_of_rpms_invalid_name(so):
brew_list.list_uses_of_rpms(so, ",,,,", "4", "0", search_type="RPM")
assert "Invalid RPM name" in so.said
def test_list_uses_of_rpms_brew_failure(so):
flexmock(brew_list.util).should_receive("koji_client_session").and_raise(Exception("bork"))
brew_list.list_uses_of_rpms(so, "spam", "4", "0")
assert "bork" in so.said_monitoring
assert "Failed to connect to brew" in so.said
def test_list_uses_of_rpms_unknown_packages(so):
flexmock(brew_list.util).should_receive("koji_client_session").and_return(object())
flexmock(brew_list).should_receive("_find_rpms_in_packages").and_return({})
brew_list.list_uses_of_rpms(so, "spam", "4", "0", "package")
assert "Could not find any package" in so.said
@pytest.mark.parametrize("names, rpms_for_package, rpms_for_image, rhcos_rpms, expect_to_say, expect_not_to_say",
[
( # basic search by package
"spam,eggs", # names the user is searching for
dict(spam=["spam-eggs", "spam-sausage"], eggs=["eggs"]), # rpms built for pkgs (for a package search)
dict(imgspam=["sausage-4.0-1.el8.noarch"]), # images containing rpms
["sausage-4.0-1.el8.noarch"], # rpms in rhcos
["nothing in 4.0 uses that"], # should see this (none of the search rpms were present)
["sausage"], # should not see
),
( # package search where some but not all are missing
"spam,eggs,bacon", # names the user is searching for
dict(spam=["spam-eggs", "spam-sausage"], bacon=["bacon"]), # rpms built for pkgs (for a package search)
dict(imgspam=["spam-eggs-4.0-1.el8.noarch"]), # images containing rpms
["sausage-4.0-1.el8.noarch"], # rpms in rhcos
[ # should see
"Could not find package(s) ['eggs'] in brew",
"package spam includes rpm(s): {'spam-eggs'}",
"imgspam uses {'spam-eggs",
],
["spam-sausage"], # should not see
),
( # basic search by rpm
"spam,eggs", # names the user is searching for
None, # not a pkg search, names are rpm names
dict(imgspam=["spam-4.0-1.el8.noarch"]), # images containing rpms
["eggs-4.0-1.el8", "baked-beans-4-1.el8"], # rpms in rhcos
["imgspam uses {'spam-4.0-1.el8'}", "RHCOS uses {'eggs-4.0-1.el8'}"], # should see these
["baked-beans"], # should not see
),
]
)
def test_list_uses_of_pkgs(so, names, rpms_for_package, rpms_for_image, rhcos_rpms, expect_to_say, expect_not_to_say):
major, minor = "4", "0"
search_type = "package" if rpms_for_package else "rpm"
flexmock(brew_list.util).should_receive("koji_client_session").and_return(object())
flexmock(brew_list).should_receive("_find_rpms_in_packages").and_return(rpms_for_package)
flexmock(brew_list).should_receive("latest_images_for_version").and_return(rpms_for_image.keys())
flexmock(brew_list, brew_list_components=lambda nvr: rpms_for_image[nvr])
flexmock(brew_list).should_receive("_find_rhcos_build_rpms").and_return(rhcos_rpms)
brew_list.list_uses_of_rpms(so, names, major, minor, search_type)
for phrase in expect_to_say:
assert phrase in so.said
for phrase in expect_not_to_say:
assert phrase not in so.said
|
en
| 0.929812
|
{ "rpmostree.rpmdb.pkglist" : [ [ "NetworkManager", "1", "1.20.0", "5.el8_1", "x86_64" ], [ "NetworkManager-libnm", "1", "1.20.0", "5.el8_1", "x86_64" ] ] } # in the case where no builds are tagged, these will be hit to find a build # rpms from rhcos build # rpms we're looking for # filtered first by second # rpms we saw # images and the rpms that are in them # rpms we're looking for, lowercase # filtered by search set, arch removed # rpms we saw # basic search by package # names the user is searching for # rpms built for pkgs (for a package search) # images containing rpms # rpms in rhcos # should see this (none of the search rpms were present) # should not see # package search where some but not all are missing # names the user is searching for # rpms built for pkgs (for a package search) # images containing rpms # rpms in rhcos # should see # should not see # basic search by rpm # names the user is searching for # not a pkg search, names are rpm names # images containing rpms # rpms in rhcos # should see these # should not see
| 2.14808
| 2
|
pymatgen/analysis/elasticity/tensors.py
|
mailhexu/pymatgen
| 0
|
6627746
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from scipy.linalg import polar
import numpy as np
import itertools
import warnings
import collections
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
"""
This module provides a base class for tensor-like objects and methods for
basic tensor manipulation. It also provides a class, SquareTensor,
that provides basic methods for creating and manipulating rank 2 tensors
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = ("<NAME>, <NAME>, <NAME>, "
"<NAME>, <NAME>, <NAME>")
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "March 22, 2012"
voigt_map = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]
reverse_voigt_map = np.array([[0, 5, 4],
[5, 1, 3],
[4, 3, 2]])
class Tensor(np.ndarray):
"""
Base class for doing useful general operations on Nth order tensors,
without restrictions on the type (stress, elastic, strain, piezo, etc.)
"""
def __new__(cls, input_array, vscale=None, check_rank=None):
"""
Create a Tensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array: (array-like with shape 3^N): array-like representing
a tensor quantity in standard (i. e. non-voigt) notation
vscale: (N x M array-like): a matrix corresponding
to the coefficients of the voigt-notation tensor
"""
obj = np.asarray(input_array).view(cls)
obj.rank = len(obj.shape)
if check_rank and check_rank != obj.rank:
raise ValueError("{} input must be rank {}".format(
obj.__class__.__name__, check_rank))
vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank // 2))
obj._vscale = np.ones(vshape)
if vscale is not None:
obj._vscale = vscale
if obj._vscale.shape != vshape:
raise ValueError("Voigt scaling matrix must be the shape of the "
"voigt notation matrix or vector.")
if not all([i == 3 for i in obj.shape]):
raise ValueError("Pymatgen only supports 3-dimensional tensors, "
"and default tensor constructor uses standard "
"notation. To construct from voigt notation, use"
" {}.from_voigt".format(obj.__class__.__name__))
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.rank = getattr(obj, 'rank', None)
self._vscale = getattr(obj, '_vscale', None)
self._vdict = getattr(obj, '_vdict', None)
def __array_wrap__(self, obj):
"""
Overrides __array_wrap__ methods in ndarray superclass to avoid errors
associated with functions that return scalar values
"""
if len(obj.shape) == 0:
return obj[()]
else:
return np.ndarray.__array_wrap__(self, obj)
def __hash__(self):
"""
define a hash function, since numpy arrays
have their own __eq__ method
"""
return hash(self.tostring())
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
self.__str__())
def zeroed(self, tol=1e-3):
"""
returns the matrix with all entries below a certain threshold
(i.e. tol) set to zero
"""
new_tensor = self.copy()
new_tensor[abs(new_tensor) < tol] = 0
return new_tensor
def transform(self, symm_op):
"""
Applies a transformation (via a symmetry operation) to a tensor.
Args:
symm_op (SymmOp): a symmetry operation to apply to the tensor
"""
return self.__class__(symm_op.transform_tensor(self))
def rotate(self, matrix, tol=1e-3):
"""
Applies a rotation directly, and tests input matrix to ensure a valid
rotation.
Args:
matrix (3x3 array-like): rotation matrix to be applied to tensor
tol (float): tolerance for testing rotation matrix validity
"""
matrix = SquareTensor(matrix)
if not matrix.is_rotation(tol):
raise ValueError("Rotation matrix is not valid.")
sop = SymmOp.from_rotation_and_translation(matrix,
[0., 0., 0.])
return self.transform(sop)
@property
def symmetrized(self):
"""
Returns a generally symmetrized tensor, calculated by taking
the sum of the tensor and its transpose with respect to all
possible permutations of indices
"""
perms = list(itertools.permutations(range(self.rank)))
return sum([np.transpose(self, ind) for ind in perms]) / len(perms)
@property
def voigt_symmetrized(self):
"""
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation
tensor such that it is invariant wrt permutation of indices
"""
if not (self.rank % 2 == 0 and self.rank > 2):
raise ValueError("V-symmetrization requires rank even and > 2")
v = self.voigt
perms = list(itertools.permutations(range(len(v.shape))))
new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms)
return self.__class__.from_voigt(new_v)
def is_symmetric(self, tol=1e-5):
"""
Tests whether a tensor is symmetric or not based on the residual
with its symmetric part, from self.symmetrized
Args:
tol (float): tolerance to test for symmetry
"""
return (self - self.symmetrized < tol).all()
def fit_to_structure(self, structure, symprec=0.1):
"""
Returns a tensor that is invariant with respect to symmetry
operations corresponding to a structure
Args:
structure (Structure): structure from which to generate
symmetry operations
symprec (float): symmetry tolerance for the Spacegroup Analyzer
used to generate the symmetry operations
"""
sga = SpacegroupAnalyzer(structure, symprec)
symm_ops = sga.get_symmetry_operations(cartesian=True)
return sum([self.transform(symm_op)
for symm_op in symm_ops]) / len(symm_ops)
def is_fit_to_structure(self, structure, tol=1e-2):
"""
Tests whether a tensor is invariant with respect to the
symmetry operations of a particular structure by testing
whether the residual of the symmetric portion is below a
tolerance
Args:
structure (Structure): structure to be fit to
tol (float): tolerance for symmetry testing
"""
return (self - self.fit_to_structure(structure) < tol).all()
@property
def voigt(self):
"""
Returns the tensor in Voigt notation
"""
v_matrix = np.zeros(self._vscale.shape, dtype=self.dtype)
this_voigt_map = self.get_voigt_dict(self.rank)
for ind in this_voigt_map:
v_matrix[this_voigt_map[ind]] = self[ind]
if not self.is_voigt_symmetric():
warnings.warn("Tensor is not symmetric, information may "
"be lost in voigt conversion.")
return v_matrix * self._vscale
def is_voigt_symmetric(self, tol=1e-6):
"""
Tests symmetry of tensor to that necessary for voigt-conversion
by grouping indices into pairs and constructing a sequence of
possible permutations to be used in a tensor transpose
"""
transpose_pieces = [[[0 for i in range(self.rank % 2)]]]
transpose_pieces += [[range(j, j + 2)] for j in
range(self.rank % 2, self.rank, 2)]
for n in range(self.rank % 2, len(transpose_pieces)):
if len(transpose_pieces[n][0]) == 2:
transpose_pieces[n] += [transpose_pieces[n][0][::-1]]
for trans_seq in itertools.product(*transpose_pieces):
trans_seq = list(itertools.chain(*trans_seq))
if (self - self.transpose(trans_seq) > tol).any():
return False
return True
@staticmethod
def get_voigt_dict(rank):
"""
Returns a dictionary that maps indices in the tensor to those
in a voigt representation based on input rank
Args:
rank (int): Tensor rank to generate the voigt map
"""
vdict = {}
for ind in itertools.product(*[range(3)] * rank):
v_ind = ind[:rank % 2]
for j in range(rank // 2):
pos = rank % 2 + 2 * j
v_ind += (reverse_voigt_map[ind[pos:pos + 2]],)
vdict[ind] = v_ind
return vdict
@classmethod
def from_voigt(cls, voigt_input):
"""
Constructor based on the voigt notation vector or matrix.
Args:
voigt_input (array-like): voigt input for a given tensor
"""
voigt_input = np.array(voigt_input)
rank = sum(voigt_input.shape) // 3
t = cls(np.zeros([3] * rank))
if voigt_input.shape != t._vscale.shape:
raise ValueError("Invalid shape for voigt matrix")
voigt_input = voigt_input / t._vscale
this_voigt_map = t.get_voigt_dict(rank)
for ind in this_voigt_map:
t[ind] = voigt_input[this_voigt_map[ind]]
return cls(t)
def convert_to_ieee(self, structure):
"""
Given a structure associated with a tensor, attempts a
calculation of the tensor in IEEE format according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
"""
def get_uvec(v):
""" Gets a unit vector parallel to input vector"""
l = np.linalg.norm(v)
if l < 1e-8:
return v
return v / l
# Check conventional setting:
sga = SpacegroupAnalyzer(structure)
dataset = sga.get_symmetry_dataset()
trans_mat = dataset['transformation_matrix']
conv_latt = Lattice(np.transpose(np.dot(np.transpose(
structure.lattice.matrix), np.linalg.inv(trans_mat))))
xtal_sys = sga.get_crystal_system()
vecs = conv_latt.matrix
lengths = np.array(conv_latt.abc)
angles = np.array(conv_latt.angles)
rotation = np.zeros((3, 3))
# IEEE rules: a,b,c || x1,x2,x3
if xtal_sys == "cubic":
rotation = [vecs[i] / lengths[i] for i in range(3)]
# IEEE rules: a=b in length; c,a || x3, x1
elif xtal_sys == "tetragonal":
rotation = np.array([vec / mag for (mag, vec) in
sorted(zip(lengths, vecs),
key=lambda x: x[0])])
if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):
rotation[0], rotation[2] = rotation[2], rotation[0].copy()
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: c<a<b; c,a || x3,x1
elif xtal_sys == "orthorhombic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
# IEEE rules: c,a || x3,x1, c is threefold axis
# Note this also includes rhombohedral crystal systems
elif xtal_sys in ("trigonal", "hexagonal"):
# find threefold axis:
tf_index = np.argmin(abs(angles - 120.))
non_tf_mask = np.logical_not(angles == angles[tf_index])
rotation[2] = get_uvec(vecs[tf_index])
rotation[0] = get_uvec(vecs[non_tf_mask][0])
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: b,c || x2,x3; alpha=beta=90, c<a
elif xtal_sys == "monoclinic":
# Find unique axis
u_index = np.argmax(abs(angles - 90.))
n_umask = np.logical_not(angles == angles[u_index])
rotation[1] = get_uvec(vecs[u_index])
# Shorter of remaining lattice vectors for c axis
c = [vec / mag for (mag, vec) in
sorted(zip(lengths[n_umask], vecs[n_umask]))][0]
rotation[2] = np.array(c)
rotation[0] = np.cross(rotation[1], rotation[2])
# IEEE rules: c || x3
elif xtal_sys == "triclinic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
rotation[1] = get_uvec(np.cross(rotation[2], rotation[1]))
rotation[0] = np.cross(rotation[1], rotation[2])
return self.rotate(rotation, tol=1e-2)
class TensorCollection(collections.Sequence):
"""
A sequence of tensors that can be used for fitting data
or for having a tensor expansion
"""
def __init__(self, tensor_list, base_class=Tensor):
self.tensors = [base_class(t) if not isinstance(t, base_class)
else t for t in tensor_list]
def __len__(self):
return len(self.tensors)
def __getitem__(self, ind):
return self.tensors[ind]
def __iter__(self):
return self.tensors.__iter__()
def zeroed(self, tol=1e-3):
return self.__class__([t.zeroed(tol) for t in self])
def transform(self, symm_op):
return self.__class__([t.transform(symm_op) for t in self])
def rotate(self, matrix, tol=1e-3):
return self.__class__([t.rotate(matrix, tol) for t in self])
@property
def symmetrized(self):
return self.__class__([t.symmetrized for t in self])
def is_symmetric(self, tol=1e-5):
return all([t.is_symmetric(tol) for t in self])
def fit_to_structure(self, structure, symprec=0.1):
return self.__class__([t.fit_to_structure(structure, symprec)
for t in self])
@property
def voigt(self):
return [t.voigt for t in self]
def is_voigt_symmetric(self, tol=1e-6):
return all([t.is_voigt_symmetric(tol) for t in self])
@classmethod
def from_voigt(cls, voigt_input_list, base_class=Tensor):
return cls([base_class.from_voigt(v) for v in voigt_input_list])
def convert_to_ieee(self, structure):
return self.__class__([t.convert_to_ieee(structure) for t in self])
class SquareTensor(Tensor):
"""
Base class for doing useful general operations on second rank tensors
(stress, strain etc.).
"""
def __new__(cls, input_array, vscale=None):
"""
Create a SquareTensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Error is thrown when the class is
initialized with non-square matrix.
Args:
input_array (3x3 array-like): the 3x3 array-like
representing the content of the tensor
vscale (6x1 array-like): 6x1 array-like scaling the
voigt-notation vector with the tensor entries
"""
obj = super(SquareTensor, cls).__new__(cls, input_array, vscale,
check_rank=2)
return obj.view(cls)
@property
def trans(self):
"""
shorthand for transpose on SquareTensor
"""
return SquareTensor(np.transpose(self))
@property
def inv(self):
"""
shorthand for matrix inverse on SquareTensor
"""
if self.det == 0:
raise ValueError("SquareTensor is non-invertible")
return SquareTensor(np.linalg.inv(self))
@property
def det(self):
"""
shorthand for the determinant of the SquareTensor
"""
return np.linalg.det(self)
def is_rotation(self, tol=1e-3, include_improper=True):
"""
Test to see if tensor is a valid rotation matrix, performs a
test to check whether the inverse is equal to the transpose
and if the determinant is equal to one within the specified
tolerance
Args:
tol (float): tolerance to both tests of whether the
the determinant is one and the inverse is equal
to the transpose
include_improper (bool): whether to include improper
rotations in the determination of validity
"""
det = np.abs(np.linalg.det(self))
if include_improper:
det = np.abs(det)
return (np.abs(self.inv - self.trans) < tol).all() \
and (np.abs(det - 1.) < tol)
def get_scaled(self, scale_factor):
"""
Scales the tensor by a certain multiplicative scale factor
Args:
scale_factor (float): scalar multiplier to be applied to the
SquareTensor object
"""
return SquareTensor(self * scale_factor)
@property
def principal_invariants(self):
"""
Returns a list of principal invariants for the tensor,
which are the values of the coefficients of the characteristic
polynomial for the matrix
"""
return np.poly(self)[1:] * np.array([-1, 1, -1])
def polar_decomposition(self, side='right'):
"""
calculates matrices for polar decomposition
"""
return polar(self, side=side)
def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs):
"""
Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values
"""
sga = SpacegroupAnalyzer(structure, **kwargs)
symmops = sga.get_symmetry_operations(cartesian=True)
unique_tdict = {}
for tensor in tensors:
is_unique = True
for unique_tensor, symmop in itertools.product(unique_tdict, symmops):
if (np.abs(unique_tensor.transform(symmop) - tensor) < tol).all():
unique_tdict[unique_tensor].append(symmop)
is_unique = False
break
if is_unique:
unique_tdict[tensor] = []
return unique_tdict
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from scipy.linalg import polar
import numpy as np
import itertools
import warnings
import collections
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
"""
This module provides a base class for tensor-like objects and methods for
basic tensor manipulation. It also provides a class, SquareTensor,
that provides basic methods for creating and manipulating rank 2 tensors
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = ("<NAME>, <NAME>, <NAME>, "
"<NAME>, <NAME>, <NAME>")
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "March 22, 2012"
voigt_map = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]
reverse_voigt_map = np.array([[0, 5, 4],
[5, 1, 3],
[4, 3, 2]])
class Tensor(np.ndarray):
"""
Base class for doing useful general operations on Nth order tensors,
without restrictions on the type (stress, elastic, strain, piezo, etc.)
"""
def __new__(cls, input_array, vscale=None, check_rank=None):
"""
Create a Tensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array: (array-like with shape 3^N): array-like representing
a tensor quantity in standard (i. e. non-voigt) notation
vscale: (N x M array-like): a matrix corresponding
to the coefficients of the voigt-notation tensor
"""
obj = np.asarray(input_array).view(cls)
obj.rank = len(obj.shape)
if check_rank and check_rank != obj.rank:
raise ValueError("{} input must be rank {}".format(
obj.__class__.__name__, check_rank))
vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank // 2))
obj._vscale = np.ones(vshape)
if vscale is not None:
obj._vscale = vscale
if obj._vscale.shape != vshape:
raise ValueError("Voigt scaling matrix must be the shape of the "
"voigt notation matrix or vector.")
if not all([i == 3 for i in obj.shape]):
raise ValueError("Pymatgen only supports 3-dimensional tensors, "
"and default tensor constructor uses standard "
"notation. To construct from voigt notation, use"
" {}.from_voigt".format(obj.__class__.__name__))
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.rank = getattr(obj, 'rank', None)
self._vscale = getattr(obj, '_vscale', None)
self._vdict = getattr(obj, '_vdict', None)
def __array_wrap__(self, obj):
"""
Overrides __array_wrap__ methods in ndarray superclass to avoid errors
associated with functions that return scalar values
"""
if len(obj.shape) == 0:
return obj[()]
else:
return np.ndarray.__array_wrap__(self, obj)
def __hash__(self):
"""
define a hash function, since numpy arrays
have their own __eq__ method
"""
return hash(self.tostring())
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
self.__str__())
def zeroed(self, tol=1e-3):
"""
returns the matrix with all entries below a certain threshold
(i.e. tol) set to zero
"""
new_tensor = self.copy()
new_tensor[abs(new_tensor) < tol] = 0
return new_tensor
def transform(self, symm_op):
"""
Applies a transformation (via a symmetry operation) to a tensor.
Args:
symm_op (SymmOp): a symmetry operation to apply to the tensor
"""
return self.__class__(symm_op.transform_tensor(self))
def rotate(self, matrix, tol=1e-3):
"""
Applies a rotation directly, and tests input matrix to ensure a valid
rotation.
Args:
matrix (3x3 array-like): rotation matrix to be applied to tensor
tol (float): tolerance for testing rotation matrix validity
"""
matrix = SquareTensor(matrix)
if not matrix.is_rotation(tol):
raise ValueError("Rotation matrix is not valid.")
sop = SymmOp.from_rotation_and_translation(matrix,
[0., 0., 0.])
return self.transform(sop)
@property
def symmetrized(self):
"""
Returns a generally symmetrized tensor, calculated by taking
the sum of the tensor and its transpose with respect to all
possible permutations of indices
"""
perms = list(itertools.permutations(range(self.rank)))
return sum([np.transpose(self, ind) for ind in perms]) / len(perms)
@property
def voigt_symmetrized(self):
"""
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation
tensor such that it is invariant wrt permutation of indices
"""
if not (self.rank % 2 == 0 and self.rank > 2):
raise ValueError("V-symmetrization requires rank even and > 2")
v = self.voigt
perms = list(itertools.permutations(range(len(v.shape))))
new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms)
return self.__class__.from_voigt(new_v)
def is_symmetric(self, tol=1e-5):
"""
Tests whether a tensor is symmetric or not based on the residual
with its symmetric part, from self.symmetrized
Args:
tol (float): tolerance to test for symmetry
"""
return (self - self.symmetrized < tol).all()
def fit_to_structure(self, structure, symprec=0.1):
"""
Returns a tensor that is invariant with respect to symmetry
operations corresponding to a structure
Args:
structure (Structure): structure from which to generate
symmetry operations
symprec (float): symmetry tolerance for the Spacegroup Analyzer
used to generate the symmetry operations
"""
sga = SpacegroupAnalyzer(structure, symprec)
symm_ops = sga.get_symmetry_operations(cartesian=True)
return sum([self.transform(symm_op)
for symm_op in symm_ops]) / len(symm_ops)
def is_fit_to_structure(self, structure, tol=1e-2):
"""
Tests whether a tensor is invariant with respect to the
symmetry operations of a particular structure by testing
whether the residual of the symmetric portion is below a
tolerance
Args:
structure (Structure): structure to be fit to
tol (float): tolerance for symmetry testing
"""
return (self - self.fit_to_structure(structure) < tol).all()
@property
def voigt(self):
"""
Returns the tensor in Voigt notation
"""
v_matrix = np.zeros(self._vscale.shape, dtype=self.dtype)
this_voigt_map = self.get_voigt_dict(self.rank)
for ind in this_voigt_map:
v_matrix[this_voigt_map[ind]] = self[ind]
if not self.is_voigt_symmetric():
warnings.warn("Tensor is not symmetric, information may "
"be lost in voigt conversion.")
return v_matrix * self._vscale
def is_voigt_symmetric(self, tol=1e-6):
"""
Tests symmetry of tensor to that necessary for voigt-conversion
by grouping indices into pairs and constructing a sequence of
possible permutations to be used in a tensor transpose
"""
transpose_pieces = [[[0 for i in range(self.rank % 2)]]]
transpose_pieces += [[range(j, j + 2)] for j in
range(self.rank % 2, self.rank, 2)]
for n in range(self.rank % 2, len(transpose_pieces)):
if len(transpose_pieces[n][0]) == 2:
transpose_pieces[n] += [transpose_pieces[n][0][::-1]]
for trans_seq in itertools.product(*transpose_pieces):
trans_seq = list(itertools.chain(*trans_seq))
if (self - self.transpose(trans_seq) > tol).any():
return False
return True
@staticmethod
def get_voigt_dict(rank):
"""
Returns a dictionary that maps indices in the tensor to those
in a voigt representation based on input rank
Args:
rank (int): Tensor rank to generate the voigt map
"""
vdict = {}
for ind in itertools.product(*[range(3)] * rank):
v_ind = ind[:rank % 2]
for j in range(rank // 2):
pos = rank % 2 + 2 * j
v_ind += (reverse_voigt_map[ind[pos:pos + 2]],)
vdict[ind] = v_ind
return vdict
@classmethod
def from_voigt(cls, voigt_input):
"""
Constructor based on the voigt notation vector or matrix.
Args:
voigt_input (array-like): voigt input for a given tensor
"""
voigt_input = np.array(voigt_input)
rank = sum(voigt_input.shape) // 3
t = cls(np.zeros([3] * rank))
if voigt_input.shape != t._vscale.shape:
raise ValueError("Invalid shape for voigt matrix")
voigt_input = voigt_input / t._vscale
this_voigt_map = t.get_voigt_dict(rank)
for ind in this_voigt_map:
t[ind] = voigt_input[this_voigt_map[ind]]
return cls(t)
def convert_to_ieee(self, structure):
"""
Given a structure associated with a tensor, attempts a
calculation of the tensor in IEEE format according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
"""
def get_uvec(v):
""" Gets a unit vector parallel to input vector"""
l = np.linalg.norm(v)
if l < 1e-8:
return v
return v / l
# Check conventional setting:
sga = SpacegroupAnalyzer(structure)
dataset = sga.get_symmetry_dataset()
trans_mat = dataset['transformation_matrix']
conv_latt = Lattice(np.transpose(np.dot(np.transpose(
structure.lattice.matrix), np.linalg.inv(trans_mat))))
xtal_sys = sga.get_crystal_system()
vecs = conv_latt.matrix
lengths = np.array(conv_latt.abc)
angles = np.array(conv_latt.angles)
rotation = np.zeros((3, 3))
# IEEE rules: a,b,c || x1,x2,x3
if xtal_sys == "cubic":
rotation = [vecs[i] / lengths[i] for i in range(3)]
# IEEE rules: a=b in length; c,a || x3, x1
elif xtal_sys == "tetragonal":
rotation = np.array([vec / mag for (mag, vec) in
sorted(zip(lengths, vecs),
key=lambda x: x[0])])
if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):
rotation[0], rotation[2] = rotation[2], rotation[0].copy()
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: c<a<b; c,a || x3,x1
elif xtal_sys == "orthorhombic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
# IEEE rules: c,a || x3,x1, c is threefold axis
# Note this also includes rhombohedral crystal systems
elif xtal_sys in ("trigonal", "hexagonal"):
# find threefold axis:
tf_index = np.argmin(abs(angles - 120.))
non_tf_mask = np.logical_not(angles == angles[tf_index])
rotation[2] = get_uvec(vecs[tf_index])
rotation[0] = get_uvec(vecs[non_tf_mask][0])
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: b,c || x2,x3; alpha=beta=90, c<a
elif xtal_sys == "monoclinic":
# Find unique axis
u_index = np.argmax(abs(angles - 90.))
n_umask = np.logical_not(angles == angles[u_index])
rotation[1] = get_uvec(vecs[u_index])
# Shorter of remaining lattice vectors for c axis
c = [vec / mag for (mag, vec) in
sorted(zip(lengths[n_umask], vecs[n_umask]))][0]
rotation[2] = np.array(c)
rotation[0] = np.cross(rotation[1], rotation[2])
# IEEE rules: c || x3
elif xtal_sys == "triclinic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
rotation[1] = get_uvec(np.cross(rotation[2], rotation[1]))
rotation[0] = np.cross(rotation[1], rotation[2])
return self.rotate(rotation, tol=1e-2)
class TensorCollection(collections.Sequence):
"""
A sequence of tensors that can be used for fitting data
or for having a tensor expansion
"""
def __init__(self, tensor_list, base_class=Tensor):
self.tensors = [base_class(t) if not isinstance(t, base_class)
else t for t in tensor_list]
def __len__(self):
return len(self.tensors)
def __getitem__(self, ind):
return self.tensors[ind]
def __iter__(self):
return self.tensors.__iter__()
def zeroed(self, tol=1e-3):
return self.__class__([t.zeroed(tol) for t in self])
def transform(self, symm_op):
return self.__class__([t.transform(symm_op) for t in self])
def rotate(self, matrix, tol=1e-3):
return self.__class__([t.rotate(matrix, tol) for t in self])
@property
def symmetrized(self):
return self.__class__([t.symmetrized for t in self])
def is_symmetric(self, tol=1e-5):
return all([t.is_symmetric(tol) for t in self])
def fit_to_structure(self, structure, symprec=0.1):
return self.__class__([t.fit_to_structure(structure, symprec)
for t in self])
@property
def voigt(self):
return [t.voigt for t in self]
def is_voigt_symmetric(self, tol=1e-6):
return all([t.is_voigt_symmetric(tol) for t in self])
@classmethod
def from_voigt(cls, voigt_input_list, base_class=Tensor):
return cls([base_class.from_voigt(v) for v in voigt_input_list])
def convert_to_ieee(self, structure):
return self.__class__([t.convert_to_ieee(structure) for t in self])
class SquareTensor(Tensor):
"""
Base class for doing useful general operations on second rank tensors
(stress, strain etc.).
"""
def __new__(cls, input_array, vscale=None):
"""
Create a SquareTensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Error is thrown when the class is
initialized with non-square matrix.
Args:
input_array (3x3 array-like): the 3x3 array-like
representing the content of the tensor
vscale (6x1 array-like): 6x1 array-like scaling the
voigt-notation vector with the tensor entries
"""
obj = super(SquareTensor, cls).__new__(cls, input_array, vscale,
check_rank=2)
return obj.view(cls)
@property
def trans(self):
"""
shorthand for transpose on SquareTensor
"""
return SquareTensor(np.transpose(self))
@property
def inv(self):
"""
shorthand for matrix inverse on SquareTensor
"""
if self.det == 0:
raise ValueError("SquareTensor is non-invertible")
return SquareTensor(np.linalg.inv(self))
@property
def det(self):
"""
shorthand for the determinant of the SquareTensor
"""
return np.linalg.det(self)
def is_rotation(self, tol=1e-3, include_improper=True):
"""
Test to see if tensor is a valid rotation matrix, performs a
test to check whether the inverse is equal to the transpose
and if the determinant is equal to one within the specified
tolerance
Args:
tol (float): tolerance to both tests of whether the
the determinant is one and the inverse is equal
to the transpose
include_improper (bool): whether to include improper
rotations in the determination of validity
"""
det = np.abs(np.linalg.det(self))
if include_improper:
det = np.abs(det)
return (np.abs(self.inv - self.trans) < tol).all() \
and (np.abs(det - 1.) < tol)
def get_scaled(self, scale_factor):
"""
Scales the tensor by a certain multiplicative scale factor
Args:
scale_factor (float): scalar multiplier to be applied to the
SquareTensor object
"""
return SquareTensor(self * scale_factor)
@property
def principal_invariants(self):
"""
Returns a list of principal invariants for the tensor,
which are the values of the coefficients of the characteristic
polynomial for the matrix
"""
return np.poly(self)[1:] * np.array([-1, 1, -1])
def polar_decomposition(self, side='right'):
"""
calculates matrices for polar decomposition
"""
return polar(self, side=side)
def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs):
"""
Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values
"""
sga = SpacegroupAnalyzer(structure, **kwargs)
symmops = sga.get_symmetry_operations(cartesian=True)
unique_tdict = {}
for tensor in tensors:
is_unique = True
for unique_tensor, symmop in itertools.product(unique_tdict, symmops):
if (np.abs(unique_tensor.transform(symmop) - tensor) < tol).all():
unique_tdict[unique_tensor].append(symmop)
is_unique = False
break
if is_unique:
unique_tdict[tensor] = []
return unique_tdict
|
en
| 0.79582
|
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. This module provides a base class for tensor-like objects and methods for basic tensor manipulation. It also provides a class, SquareTensor, that provides basic methods for creating and manipulating rank 2 tensors Base class for doing useful general operations on Nth order tensors, without restrictions on the type (stress, elastic, strain, piezo, etc.) Create a Tensor object. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. Args: input_array: (array-like with shape 3^N): array-like representing a tensor quantity in standard (i. e. non-voigt) notation vscale: (N x M array-like): a matrix corresponding to the coefficients of the voigt-notation tensor Overrides __array_wrap__ methods in ndarray superclass to avoid errors associated with functions that return scalar values define a hash function, since numpy arrays have their own __eq__ method returns the matrix with all entries below a certain threshold (i.e. tol) set to zero Applies a transformation (via a symmetry operation) to a tensor. Args: symm_op (SymmOp): a symmetry operation to apply to the tensor Applies a rotation directly, and tests input matrix to ensure a valid rotation. Args: matrix (3x3 array-like): rotation matrix to be applied to tensor tol (float): tolerance for testing rotation matrix validity Returns a generally symmetrized tensor, calculated by taking the sum of the tensor and its transpose with respect to all possible permutations of indices Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices Tests whether a tensor is symmetric or not based on the residual with its symmetric part, from self.symmetrized Args: tol (float): tolerance to test for symmetry Returns a tensor that is invariant with respect to symmetry operations corresponding to a structure Args: structure (Structure): structure from which to generate symmetry operations symprec (float): symmetry tolerance for the Spacegroup Analyzer used to generate the symmetry operations Tests whether a tensor is invariant with respect to the symmetry operations of a particular structure by testing whether the residual of the symmetric portion is below a tolerance Args: structure (Structure): structure to be fit to tol (float): tolerance for symmetry testing Returns the tensor in Voigt notation Tests symmetry of tensor to that necessary for voigt-conversion by grouping indices into pairs and constructing a sequence of possible permutations to be used in a tensor transpose Returns a dictionary that maps indices in the tensor to those in a voigt representation based on input rank Args: rank (int): Tensor rank to generate the voigt map Constructor based on the voigt notation vector or matrix. Args: voigt_input (array-like): voigt input for a given tensor Given a structure associated with a tensor, attempts a calculation of the tensor in IEEE format according to the 1987 IEEE standards. Args: structure (Structure): a structure associated with the tensor to be converted to the IEEE standard Gets a unit vector parallel to input vector # Check conventional setting: # IEEE rules: a,b,c || x1,x2,x3 # IEEE rules: a=b in length; c,a || x3, x1 # IEEE rules: c<a<b; c,a || x3,x1 # IEEE rules: c,a || x3,x1, c is threefold axis # Note this also includes rhombohedral crystal systems # find threefold axis: # IEEE rules: b,c || x2,x3; alpha=beta=90, c<a # Find unique axis # Shorter of remaining lattice vectors for c axis # IEEE rules: c || x3 A sequence of tensors that can be used for fitting data or for having a tensor expansion Base class for doing useful general operations on second rank tensors (stress, strain etc.). Create a SquareTensor object. Note that the constructor uses __new__ rather than __init__ according to the standard method of subclassing numpy ndarrays. Error is thrown when the class is initialized with non-square matrix. Args: input_array (3x3 array-like): the 3x3 array-like representing the content of the tensor vscale (6x1 array-like): 6x1 array-like scaling the voigt-notation vector with the tensor entries shorthand for transpose on SquareTensor shorthand for matrix inverse on SquareTensor shorthand for the determinant of the SquareTensor Test to see if tensor is a valid rotation matrix, performs a test to check whether the inverse is equal to the transpose and if the determinant is equal to one within the specified tolerance Args: tol (float): tolerance to both tests of whether the the determinant is one and the inverse is equal to the transpose include_improper (bool): whether to include improper rotations in the determination of validity Scales the tensor by a certain multiplicative scale factor Args: scale_factor (float): scalar multiplier to be applied to the SquareTensor object Returns a list of principal invariants for the tensor, which are the values of the coefficients of the characteristic polynomial for the matrix calculates matrices for polar decomposition Function that converts a list of tensors corresponding to a structure and returns a dictionary consisting of unique tensor keys with symmop values corresponding to transformations that will result in derivative tensors from the original list Args: tensors (list of tensors): list of Tensor objects to test for symmetrically-equivalent duplicates structure (Structure): structure from which to get symmetry tol (float): tolerance for tensor equivalence kwargs: keyword arguments for the SpacegroupAnalyzer returns: dictionary consisting of unique tensors with symmetry operations corresponding to those which will reconstruct the remaining tensors as values
| 2.315659
| 2
|
identity/views.py
|
Natan7/vault
| 1
|
6627747
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic.base import View, TemplateView
from django.views.generic.edit import FormView
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from keystoneclient.openstack.common.apiclient import exceptions
from swiftbrowser.utils import delete_swift_account
from actionlogger import ActionLogger
from identity.keystone import Keystone
from identity.forms import (UserForm, CreateUserForm, UpdateUserForm,
ProjectForm, DeleteProjectConfirm)
from vault import utils
from vault.models import GroupProjects, AreaProjects
from vault.views import SuperUserMixin, JSONResponseMixin, LoginRequiredMixin
log = logging.getLogger(__name__)
actionlog = ActionLogger()
class ListUserView(SuperUserMixin, TemplateView):
template_name = "identity/users.html"
def get_context_data(self, **kwargs):
context = super(ListUserView, self).get_context_data(**kwargs)
page = self.request.GET.get('page', 1)
users = []
try:
users = self.keystone.user_list()
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(self.request, messages.ERROR,
_('Unable to list users'))
sorted_users = sorted(users, key=lambda l: l.name.lower())
context['users'] = utils.generic_pagination(sorted_users, page)
return context
class BaseUserView(SuperUserMixin, FormView):
form_class = UserForm
success_url = reverse_lazy('admin_list_users')
def _fill_project_choices(self, form):
if self.keystone and 'project' in form.fields:
items = [('', '---')]
for i in self.keystone.project_list():
if getattr(i, 'enabled', None):
items.append((i.id, i.name))
form.fields['project'].choices = items
def _fill_role_choices(self, form):
if self.keystone and 'role' in form.fields:
items = [('', '---')]
for i in self.keystone.role_list():
items.append((i.id, i.name))
form.fields['role'].choices = items
@method_decorator(sensitive_post_parameters('password', 'password_confirm'))
def dispatch(self, *args, **kwargs):
return super(BaseUserView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
self._fill_role_choices(form)
return self.render_to_response(
self.get_context_data(form=form, **kwargs)
)
def get_context_data(self, **kwargs):
context = super(BaseUserView, self).get_context_data(**kwargs)
user_id = kwargs.get('user_id')
form = kwargs.get('form')
if not user_id:
user_id = form.data.get('id')
if user_id:
user = self.keystone.user_get(user_id)
form.initial = user.to_dict()
form.fields['project'].initial = user.project_id
context['user_id'] = user_id
return context
class CreateUserView(BaseUserView):
form_class = CreateUserForm
template_name = "identity/user_create.html"
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
self._fill_role_choices(form)
if form.is_valid():
post = request.POST
enabled = False if post.get('enabled') in ('False', '0') else True
try:
user = self.keystone.user_create(name=post.get('name'),
email=post.get('email'), password=post.get('password'),
project_id=post.get('project'), enabled=enabled,
domain=post.get('domain'), role_id=post.get('role'))
messages.add_message(request, messages.SUCCESS,
_('Successfully created user'))
actionlog.log(request.user.username, 'create', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when create user'))
return self.form_valid(form)
else:
return self.form_invalid(form)
class UpdateUserView(BaseUserView):
form_class = UpdateUserForm
template_name = "identity/user_edit.html"
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
if form.is_valid():
post = request.POST
enabled = False if post.get('enabled') in ('False', '0') else True
try:
user = self.keystone.user_get(post.get('id'))
# can't modify primary project
project = self.keystone.project_get(user.project_id)
self.keystone.user_update(user, name=post.get('name'),
email=post.get('email'), password=post.get('password'),
project=project, enabled=enabled, domain=post.get('domain'))
messages.add_message(request, messages.SUCCESS,
_('Successfully updated user'))
actionlog.log(request.user.username, 'update', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when update user'))
return self.form_valid(form)
else:
return self.form_invalid(form)
class DeleteUserView(BaseUserView):
def get(self, request, *args, **kwargs):
try:
self.keystone.user_delete(kwargs.get('user_id'))
messages.add_message(request, messages.SUCCESS,
_('Successfully deleted user'))
actionlog.log(request.user.username, 'delete',
'user_id: %s' % kwargs.get('user_id'))
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when delete user'))
return HttpResponseRedirect(self.success_url)
class BaseProjectView(LoginRequiredMixin, FormView):
success_url = reverse_lazy('dashboard')
def get(self, request, *args, **kwargs):
if request.resolver_match is not None and request.resolver_match.url_name == 'edit_project':
form = ProjectForm(initial={'user': request.user, 'action': 'update'})
else:
form = ProjectForm(initial={'user': request.user})
context = self.get_context_data(form=form, request=request, **kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
request = kwargs.get('request')
context = super(BaseProjectView, self).get_context_data(**kwargs)
project_id = kwargs.get('project_id')
form = kwargs.get('form')
# Mostra a gerencia de roles qd for superuser acessando admin
context['show_roles'] = request.user.is_superuser and \
request.path[0:15] == '/admin/project/'
if not project_id:
project_id = form.data.get('id')
if project_id:
project = self.keystone.project_get(project_id)
form.initial = project.to_dict()
group_project = GroupProjects.objects.get(project_id=project_id)
area_project = AreaProjects.objects.get(project_id=project_id)
form.initial['groups'] = group_project.group_id
form.initial['areas'] = area_project.area_id
context['idendity_project_id'] = project_id
context['has_id'] = True
user = self.keystone.return_find_u_user(kwargs.get('project_id'))
if user:
context['user_project'] = user.username
if context['show_roles']:
try:
users = self.keystone.user_list()
context['users'] = sorted(users, key=lambda l: l.name.lower())
roles = self.keystone.role_list()
context['roles'] = sorted(roles, key=lambda l: l.name.lower())
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return context
class ListProjectView(SuperUserMixin, TemplateView):
template_name = "identity/projects.html"
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(ListProjectView, self).get_context_data(**kwargs)
page = self.request.GET.get('page', 1)
context['is_admin'] = self.request.path[0:16] == '/admin/projects/'
try:
projects = sorted(self.keystone.project_list(),
key=lambda l: l.name.lower())
context['projects'] = utils.generic_pagination(projects, page)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(self.request, messages.ERROR,
_('Unable to list projects'))
return context
class CreateProjectSuccessView(LoginRequiredMixin, TemplateView):
template_name = 'identity/project_create_success.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(request=request, **kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(CreateProjectSuccessView, self).get_context_data(**kwargs)
request = kwargs.get('request')
context['project_info'] = request.session.get('project_info')
context['project_info']['auth_url'] = settings.KEYSTONE_URL
project_name = context['project_info']['project_name']
user_name = context['project_info']['user_name']
password = context['project_info']['user_password']
keystone = Keystone(request, username=user_name, password=password,
tenant_name=project_name)
context['project_info']['endpoints'] = keystone.get_endpoints()
return context
class CreateProjectView(BaseProjectView):
template_name = "identity/project_create.html"
form_class = ProjectForm
success_url = reverse_lazy('projects')
def post(self, request, *args, **kwargs):
form = ProjectForm(initial={'user': request.user}, data=request.POST)
if form.is_valid():
post = request.POST
description = post.get('description')
if description == '':
description = None
response = self.keystone.vault_create_project(post.get('name'),
post.get('groups'),
post.get('areas'),
description=description)
# Houve falha no cadastro
if not response.get('status'):
log.exception('{}{}'.format(_('Exception: ').encode('UTF-8'), response.get('status')))
messages.add_message(request, messages.ERROR,
response.get('reason'))
return self.render_to_response(self.get_context_data(form=form, request=request))
project = response.get('project')
user = response.get('user')
actionlog.log(request.user.username, 'create', project)
actionlog.log(request.user.username, 'create', user)
request.session['project_info'] = {
'user_name': user.name,
'project_name': project.name,
'user_password': response.get('password')
}
return redirect('create_project_success')
else:
return self.render_to_response(self.get_context_data(form=form, request=request))
class UpdateProjectView(BaseProjectView):
template_name = "identity/project_edit.html"
def post(self, request, *args, **kwargs):
form = ProjectForm(initial={'user': request.user}, data=request.POST)
post = request.POST
if form.is_valid():
enabled = False if post.get('enabled') in ('False', '0') else True
group_id = post.get('groups')
area_id = post.get('areas')
description = post.get('description')
if description == '':
description = None
try:
project = self.keystone.project_get(post.get('id'))
self.keystone.vault_update_project(project.id, project.name,
group_id, area_id,
description=description,
enabled=enabled)
messages.add_message(request, messages.SUCCESS,
_('Successfully updated project'))
actionlog.log(request.user.username, 'update', project)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when update project'))
return self.form_valid(form)
else:
context = self.get_context_data(form=form, request=request)
return self.render_to_response(context)
class DeleteProjectView(BaseProjectView):
template_name = "identity/project_delete_confirm.html"
def get(self, request, *args, **kwargs):
form = DeleteProjectConfirm()
return self.render_to_response({'form': form})
def post(self, request, *args, **kwargs):
form = DeleteProjectConfirm(data=request.POST)
if not form.is_valid():
return self.render_to_response(
self.get_context_data(form=form, request=request)
)
user = form.data.get('user')
password = form.data.get('password')
project_id = self.kwargs.get('project_id')
project_name = self.keystone.project_get(project_id).name
try:
keystone_app = Keystone(request,
username=user,
password=password,
tenant_name=project_name)
except exceptions.Unauthorized:
# Falhou ao auntenticar com as credenciais enviadas pelo usuario
messages.add_message(request, messages.ERROR, _('Invalid credentials.'))
return self.render_to_response(
context=self.get_context_data(form=form, request=request)
)
endpoints = keystone_app.get_endpoints()
storage_url = endpoints['adminURL']
auth_token = self.keystone.conn.auth_token
swift_del_result = delete_swift_account(storage_url, auth_token)
if not swift_del_result:
messages.add_message(request, messages.ERROR,
_('Error when delete project'))
return HttpResponseRedirect(reverse('edit_project', kwargs={'project_id': project_id}))
try:
self.keystone.vault_delete_project(project_id)
messages.add_message(request, messages.SUCCESS,
_('Successfully deleted project.'))
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when delete project'))
return HttpResponseRedirect(self.success_url)
class ListUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project_id = request.POST.get('project')
context = {}
try:
project_users = self.keystone.user_list(project_id=project_id)
context['users'] = []
unique_users = set()
for user in project_users:
if user.username not in unique_users:
unique_users.add(user.username)
context['users'].append({
'id': user.id,
'username': user.username,
'roles': self.get_user_roles(user, project_id)
})
return self.render_to_response(context)
except Exception as e:
context['msg'] = 'Error listing users'
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
def get_user_roles(self, user, project_id):
# TODO: in v3 client users won't list roles (verify role_assignments)
return [{'id': r.id, 'name': r.name}
for r in user.list_roles(project_id)]
class AddUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project = request.POST.get('project')
role = request.POST.get('role')
user = request.POST.get('user')
context = {'msg': 'ok'}
try:
self.keystone.add_user_role(project=project, role=role, user=user)
item = 'project: %s, role: %s, user: %s' % (project, role, user)
actionlog.log(request.user.username, 'create', item)
return self.render_to_response(context)
except exceptions.Conflict as e:
context['msg'] = _('User already registered with this role')
log.exception('{}{}'.format(_('Conflict:'), e))
return self.render_to_response(context, status=500)
except Exception as e:
context['msg'] = _('Error adding user')
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
class DeleteUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project = request.POST.get('project')
role = request.POST.get('role')
user = request.POST.get('user')
context = {'msg': 'ok'}
try:
self.keystone.remove_user_role(project=project, role=role, user=user)
item = 'project: %s, role: %s, user: %s' % (project, role, user)
actionlog.log(request.user.username, 'delete', item)
return self.render_to_response(context)
except Exception as e:
context['msg'] = _('Error removing user')
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
class UpdateProjectUserPasswordView(LoginRequiredMixin, View, JSONResponseMixin):
def get(self, request, *args, **kwargs):
context = {}
try:
user = self.keystone.return_find_u_user(kwargs.get('project_id'))
new_password = <PASSWORD>()
self.keystone.user_update(user, password=<PASSWORD>)
context = {'new_password': <PASSWORD>}
actionlog.log(request.user.username, 'update', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=200)
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic.base import View, TemplateView
from django.views.generic.edit import FormView
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from keystoneclient.openstack.common.apiclient import exceptions
from swiftbrowser.utils import delete_swift_account
from actionlogger import ActionLogger
from identity.keystone import Keystone
from identity.forms import (UserForm, CreateUserForm, UpdateUserForm,
ProjectForm, DeleteProjectConfirm)
from vault import utils
from vault.models import GroupProjects, AreaProjects
from vault.views import SuperUserMixin, JSONResponseMixin, LoginRequiredMixin
log = logging.getLogger(__name__)
actionlog = ActionLogger()
class ListUserView(SuperUserMixin, TemplateView):
template_name = "identity/users.html"
def get_context_data(self, **kwargs):
context = super(ListUserView, self).get_context_data(**kwargs)
page = self.request.GET.get('page', 1)
users = []
try:
users = self.keystone.user_list()
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(self.request, messages.ERROR,
_('Unable to list users'))
sorted_users = sorted(users, key=lambda l: l.name.lower())
context['users'] = utils.generic_pagination(sorted_users, page)
return context
class BaseUserView(SuperUserMixin, FormView):
form_class = UserForm
success_url = reverse_lazy('admin_list_users')
def _fill_project_choices(self, form):
if self.keystone and 'project' in form.fields:
items = [('', '---')]
for i in self.keystone.project_list():
if getattr(i, 'enabled', None):
items.append((i.id, i.name))
form.fields['project'].choices = items
def _fill_role_choices(self, form):
if self.keystone and 'role' in form.fields:
items = [('', '---')]
for i in self.keystone.role_list():
items.append((i.id, i.name))
form.fields['role'].choices = items
@method_decorator(sensitive_post_parameters('password', 'password_confirm'))
def dispatch(self, *args, **kwargs):
return super(BaseUserView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
self._fill_role_choices(form)
return self.render_to_response(
self.get_context_data(form=form, **kwargs)
)
def get_context_data(self, **kwargs):
context = super(BaseUserView, self).get_context_data(**kwargs)
user_id = kwargs.get('user_id')
form = kwargs.get('form')
if not user_id:
user_id = form.data.get('id')
if user_id:
user = self.keystone.user_get(user_id)
form.initial = user.to_dict()
form.fields['project'].initial = user.project_id
context['user_id'] = user_id
return context
class CreateUserView(BaseUserView):
form_class = CreateUserForm
template_name = "identity/user_create.html"
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
self._fill_role_choices(form)
if form.is_valid():
post = request.POST
enabled = False if post.get('enabled') in ('False', '0') else True
try:
user = self.keystone.user_create(name=post.get('name'),
email=post.get('email'), password=post.get('password'),
project_id=post.get('project'), enabled=enabled,
domain=post.get('domain'), role_id=post.get('role'))
messages.add_message(request, messages.SUCCESS,
_('Successfully created user'))
actionlog.log(request.user.username, 'create', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when create user'))
return self.form_valid(form)
else:
return self.form_invalid(form)
class UpdateUserView(BaseUserView):
form_class = UpdateUserForm
template_name = "identity/user_edit.html"
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
self._fill_project_choices(form)
if form.is_valid():
post = request.POST
enabled = False if post.get('enabled') in ('False', '0') else True
try:
user = self.keystone.user_get(post.get('id'))
# can't modify primary project
project = self.keystone.project_get(user.project_id)
self.keystone.user_update(user, name=post.get('name'),
email=post.get('email'), password=post.get('password'),
project=project, enabled=enabled, domain=post.get('domain'))
messages.add_message(request, messages.SUCCESS,
_('Successfully updated user'))
actionlog.log(request.user.username, 'update', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when update user'))
return self.form_valid(form)
else:
return self.form_invalid(form)
class DeleteUserView(BaseUserView):
def get(self, request, *args, **kwargs):
try:
self.keystone.user_delete(kwargs.get('user_id'))
messages.add_message(request, messages.SUCCESS,
_('Successfully deleted user'))
actionlog.log(request.user.username, 'delete',
'user_id: %s' % kwargs.get('user_id'))
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when delete user'))
return HttpResponseRedirect(self.success_url)
class BaseProjectView(LoginRequiredMixin, FormView):
success_url = reverse_lazy('dashboard')
def get(self, request, *args, **kwargs):
if request.resolver_match is not None and request.resolver_match.url_name == 'edit_project':
form = ProjectForm(initial={'user': request.user, 'action': 'update'})
else:
form = ProjectForm(initial={'user': request.user})
context = self.get_context_data(form=form, request=request, **kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
request = kwargs.get('request')
context = super(BaseProjectView, self).get_context_data(**kwargs)
project_id = kwargs.get('project_id')
form = kwargs.get('form')
# Mostra a gerencia de roles qd for superuser acessando admin
context['show_roles'] = request.user.is_superuser and \
request.path[0:15] == '/admin/project/'
if not project_id:
project_id = form.data.get('id')
if project_id:
project = self.keystone.project_get(project_id)
form.initial = project.to_dict()
group_project = GroupProjects.objects.get(project_id=project_id)
area_project = AreaProjects.objects.get(project_id=project_id)
form.initial['groups'] = group_project.group_id
form.initial['areas'] = area_project.area_id
context['idendity_project_id'] = project_id
context['has_id'] = True
user = self.keystone.return_find_u_user(kwargs.get('project_id'))
if user:
context['user_project'] = user.username
if context['show_roles']:
try:
users = self.keystone.user_list()
context['users'] = sorted(users, key=lambda l: l.name.lower())
roles = self.keystone.role_list()
context['roles'] = sorted(roles, key=lambda l: l.name.lower())
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return context
class ListProjectView(SuperUserMixin, TemplateView):
template_name = "identity/projects.html"
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(ListProjectView, self).get_context_data(**kwargs)
page = self.request.GET.get('page', 1)
context['is_admin'] = self.request.path[0:16] == '/admin/projects/'
try:
projects = sorted(self.keystone.project_list(),
key=lambda l: l.name.lower())
context['projects'] = utils.generic_pagination(projects, page)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(self.request, messages.ERROR,
_('Unable to list projects'))
return context
class CreateProjectSuccessView(LoginRequiredMixin, TemplateView):
template_name = 'identity/project_create_success.html'
def get(self, request, *args, **kwargs):
context = self.get_context_data(request=request, **kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(CreateProjectSuccessView, self).get_context_data(**kwargs)
request = kwargs.get('request')
context['project_info'] = request.session.get('project_info')
context['project_info']['auth_url'] = settings.KEYSTONE_URL
project_name = context['project_info']['project_name']
user_name = context['project_info']['user_name']
password = context['project_info']['user_password']
keystone = Keystone(request, username=user_name, password=password,
tenant_name=project_name)
context['project_info']['endpoints'] = keystone.get_endpoints()
return context
class CreateProjectView(BaseProjectView):
template_name = "identity/project_create.html"
form_class = ProjectForm
success_url = reverse_lazy('projects')
def post(self, request, *args, **kwargs):
form = ProjectForm(initial={'user': request.user}, data=request.POST)
if form.is_valid():
post = request.POST
description = post.get('description')
if description == '':
description = None
response = self.keystone.vault_create_project(post.get('name'),
post.get('groups'),
post.get('areas'),
description=description)
# Houve falha no cadastro
if not response.get('status'):
log.exception('{}{}'.format(_('Exception: ').encode('UTF-8'), response.get('status')))
messages.add_message(request, messages.ERROR,
response.get('reason'))
return self.render_to_response(self.get_context_data(form=form, request=request))
project = response.get('project')
user = response.get('user')
actionlog.log(request.user.username, 'create', project)
actionlog.log(request.user.username, 'create', user)
request.session['project_info'] = {
'user_name': user.name,
'project_name': project.name,
'user_password': response.get('password')
}
return redirect('create_project_success')
else:
return self.render_to_response(self.get_context_data(form=form, request=request))
class UpdateProjectView(BaseProjectView):
template_name = "identity/project_edit.html"
def post(self, request, *args, **kwargs):
form = ProjectForm(initial={'user': request.user}, data=request.POST)
post = request.POST
if form.is_valid():
enabled = False if post.get('enabled') in ('False', '0') else True
group_id = post.get('groups')
area_id = post.get('areas')
description = post.get('description')
if description == '':
description = None
try:
project = self.keystone.project_get(post.get('id'))
self.keystone.vault_update_project(project.id, project.name,
group_id, area_id,
description=description,
enabled=enabled)
messages.add_message(request, messages.SUCCESS,
_('Successfully updated project'))
actionlog.log(request.user.username, 'update', project)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when update project'))
return self.form_valid(form)
else:
context = self.get_context_data(form=form, request=request)
return self.render_to_response(context)
class DeleteProjectView(BaseProjectView):
template_name = "identity/project_delete_confirm.html"
def get(self, request, *args, **kwargs):
form = DeleteProjectConfirm()
return self.render_to_response({'form': form})
def post(self, request, *args, **kwargs):
form = DeleteProjectConfirm(data=request.POST)
if not form.is_valid():
return self.render_to_response(
self.get_context_data(form=form, request=request)
)
user = form.data.get('user')
password = form.data.get('password')
project_id = self.kwargs.get('project_id')
project_name = self.keystone.project_get(project_id).name
try:
keystone_app = Keystone(request,
username=user,
password=password,
tenant_name=project_name)
except exceptions.Unauthorized:
# Falhou ao auntenticar com as credenciais enviadas pelo usuario
messages.add_message(request, messages.ERROR, _('Invalid credentials.'))
return self.render_to_response(
context=self.get_context_data(form=form, request=request)
)
endpoints = keystone_app.get_endpoints()
storage_url = endpoints['adminURL']
auth_token = self.keystone.conn.auth_token
swift_del_result = delete_swift_account(storage_url, auth_token)
if not swift_del_result:
messages.add_message(request, messages.ERROR,
_('Error when delete project'))
return HttpResponseRedirect(reverse('edit_project', kwargs={'project_id': project_id}))
try:
self.keystone.vault_delete_project(project_id)
messages.add_message(request, messages.SUCCESS,
_('Successfully deleted project.'))
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
messages.add_message(request, messages.ERROR,
_('Error when delete project'))
return HttpResponseRedirect(self.success_url)
class ListUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project_id = request.POST.get('project')
context = {}
try:
project_users = self.keystone.user_list(project_id=project_id)
context['users'] = []
unique_users = set()
for user in project_users:
if user.username not in unique_users:
unique_users.add(user.username)
context['users'].append({
'id': user.id,
'username': user.username,
'roles': self.get_user_roles(user, project_id)
})
return self.render_to_response(context)
except Exception as e:
context['msg'] = 'Error listing users'
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
def get_user_roles(self, user, project_id):
# TODO: in v3 client users won't list roles (verify role_assignments)
return [{'id': r.id, 'name': r.name}
for r in user.list_roles(project_id)]
class AddUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project = request.POST.get('project')
role = request.POST.get('role')
user = request.POST.get('user')
context = {'msg': 'ok'}
try:
self.keystone.add_user_role(project=project, role=role, user=user)
item = 'project: %s, role: %s, user: %s' % (project, role, user)
actionlog.log(request.user.username, 'create', item)
return self.render_to_response(context)
except exceptions.Conflict as e:
context['msg'] = _('User already registered with this role')
log.exception('{}{}'.format(_('Conflict:'), e))
return self.render_to_response(context, status=500)
except Exception as e:
context['msg'] = _('Error adding user')
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
class DeleteUserRoleView(SuperUserMixin, View, JSONResponseMixin):
def post(self, request, *args, **kwargs):
project = request.POST.get('project')
role = request.POST.get('role')
user = request.POST.get('user')
context = {'msg': 'ok'}
try:
self.keystone.remove_user_role(project=project, role=role, user=user)
item = 'project: %s, role: %s, user: %s' % (project, role, user)
actionlog.log(request.user.username, 'delete', item)
return self.render_to_response(context)
except Exception as e:
context['msg'] = _('Error removing user')
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=500)
class UpdateProjectUserPasswordView(LoginRequiredMixin, View, JSONResponseMixin):
def get(self, request, *args, **kwargs):
context = {}
try:
user = self.keystone.return_find_u_user(kwargs.get('project_id'))
new_password = <PASSWORD>()
self.keystone.user_update(user, password=<PASSWORD>)
context = {'new_password': <PASSWORD>}
actionlog.log(request.user.username, 'update', user)
except Exception as e:
log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), e))
return self.render_to_response(context, status=200)
|
en
| 0.447556
|
# -*- coding: utf-8 -*- # can't modify primary project # Mostra a gerencia de roles qd for superuser acessando admin # Houve falha no cadastro # Falhou ao auntenticar com as credenciais enviadas pelo usuario # TODO: in v3 client users won't list roles (verify role_assignments)
| 1.777113
| 2
|
accounts/views.py
|
drkEvo/DjangoBankSystem
| 0
|
6627748
|
from django.contrib import messages
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.views import LoginView
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, RedirectView
from .forms import UserRegistrationForm, UserAddressForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView, ListView
from django.http import HttpResponse
from django.shortcuts import render
from django.template import Context, Template
User = get_user_model()
class UserRegistrationView(TemplateView):
model = User
form_class = UserRegistrationForm
template_name = 'accounts/user_registration.html'
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return HttpResponseRedirect(
reverse_lazy('transactions:transaction_report')
)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
registration_form = UserRegistrationForm(self.request.POST)
address_form = UserAddressForm(self.request.POST)
if registration_form.is_valid() and address_form.is_valid():
user = registration_form.save()
address = address_form.save(commit=False)
address.user = user
address.save()
login(self.request, user)
messages.success(
self.request,
(
f'Thank You For Creating A Bank Account. '
f'Your Account Number is {user.account.account_no}. '
)
)
return HttpResponseRedirect(
reverse_lazy('transactions:deposit_money')
)
return self.render_to_response(
self.get_context_data(
registration_form=registration_form,
address_form=address_form
)
)
def get_context_data(self, **kwargs):
if 'registration_form' not in kwargs:
kwargs['registration_form'] = UserRegistrationForm()
if 'address_form' not in kwargs:
kwargs['address_form'] = UserAddressForm()
return super().get_context_data(**kwargs)
class UserLoginView(LoginView):
template_name='accounts/user_login.html'
redirect_authenticated_user = True
class LogoutView(RedirectView):
pattern_name = 'home'
def get_redirect_url(self, *args, **kwargs):
if self.request.user.is_authenticated:
logout(self.request)
return super().get_redirect_url(*args, **kwargs)
class UserDetails(LoginRequiredMixin, ListView):
template_name = 'accounts/user_details.html'
form_data = {}
def get(self, request, *args, **kwargs):
# import pdb
# pdb.set_trace()
c_user = self.request.user
# form = TransactionDateRangeForm(request.GET or None)
# if form.is_valid():
# self.form_data = form.cleaned_data
args = {}
args['name'] = c_user.first_name + ' ' + c_user.last_name
args['email'] = c_user
args['phone'] = "20111435465"
args['bdate'] = c_user.account.birth_date
args['acc_no'] = c_user.account.account_no
args['card'] = "22344532212"
args['balance'] = c_user.account.balance
return render(request, 'accounts/user_details.html', args)
# def get_queryset(self):
# queryset = super().get_queryset().filter(
# account=self.request.user.account
# )
# daterange = self.form_data.get("daterange")
# if daterange:
# queryset = queryset.filter(timestamp__date__range=daterange)
# return queryset.distinct()
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context.update({
# 'account': self.request.user.account,
# 'form': TransactionDateRangeForm(self.request.GET or None)
# })
|
from django.contrib import messages
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.views import LoginView
from django.shortcuts import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView, RedirectView
from .forms import UserRegistrationForm, UserAddressForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView, ListView
from django.http import HttpResponse
from django.shortcuts import render
from django.template import Context, Template
User = get_user_model()
class UserRegistrationView(TemplateView):
model = User
form_class = UserRegistrationForm
template_name = 'accounts/user_registration.html'
def dispatch(self, request, *args, **kwargs):
if self.request.user.is_authenticated:
return HttpResponseRedirect(
reverse_lazy('transactions:transaction_report')
)
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
registration_form = UserRegistrationForm(self.request.POST)
address_form = UserAddressForm(self.request.POST)
if registration_form.is_valid() and address_form.is_valid():
user = registration_form.save()
address = address_form.save(commit=False)
address.user = user
address.save()
login(self.request, user)
messages.success(
self.request,
(
f'Thank You For Creating A Bank Account. '
f'Your Account Number is {user.account.account_no}. '
)
)
return HttpResponseRedirect(
reverse_lazy('transactions:deposit_money')
)
return self.render_to_response(
self.get_context_data(
registration_form=registration_form,
address_form=address_form
)
)
def get_context_data(self, **kwargs):
if 'registration_form' not in kwargs:
kwargs['registration_form'] = UserRegistrationForm()
if 'address_form' not in kwargs:
kwargs['address_form'] = UserAddressForm()
return super().get_context_data(**kwargs)
class UserLoginView(LoginView):
template_name='accounts/user_login.html'
redirect_authenticated_user = True
class LogoutView(RedirectView):
pattern_name = 'home'
def get_redirect_url(self, *args, **kwargs):
if self.request.user.is_authenticated:
logout(self.request)
return super().get_redirect_url(*args, **kwargs)
class UserDetails(LoginRequiredMixin, ListView):
template_name = 'accounts/user_details.html'
form_data = {}
def get(self, request, *args, **kwargs):
# import pdb
# pdb.set_trace()
c_user = self.request.user
# form = TransactionDateRangeForm(request.GET or None)
# if form.is_valid():
# self.form_data = form.cleaned_data
args = {}
args['name'] = c_user.first_name + ' ' + c_user.last_name
args['email'] = c_user
args['phone'] = "20111435465"
args['bdate'] = c_user.account.birth_date
args['acc_no'] = c_user.account.account_no
args['card'] = "22344532212"
args['balance'] = c_user.account.balance
return render(request, 'accounts/user_details.html', args)
# def get_queryset(self):
# queryset = super().get_queryset().filter(
# account=self.request.user.account
# )
# daterange = self.form_data.get("daterange")
# if daterange:
# queryset = queryset.filter(timestamp__date__range=daterange)
# return queryset.distinct()
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context.update({
# 'account': self.request.user.account,
# 'form': TransactionDateRangeForm(self.request.GET or None)
# })
|
en
| 0.173424
|
# import pdb # pdb.set_trace() # form = TransactionDateRangeForm(request.GET or None) # if form.is_valid(): # self.form_data = form.cleaned_data # def get_queryset(self): # queryset = super().get_queryset().filter( # account=self.request.user.account # ) # daterange = self.form_data.get("daterange") # if daterange: # queryset = queryset.filter(timestamp__date__range=daterange) # return queryset.distinct() # def get_context_data(self, **kwargs): # context = super().get_context_data(**kwargs) # context.update({ # 'account': self.request.user.account, # 'form': TransactionDateRangeForm(self.request.GET or None) # })
| 2.260025
| 2
|
batch_generator.py
|
danathughes/DeepEmbeddedClustering
| 22
|
6627749
|
## batch_generator.py
##
##
##
## History:
## 1.0 29-Jun-2016 Initial version
## 1.1 12-Aug-2016 Changed input / output to key / value pairs
## Changed class from Dataset to Batch
## 1.2 30-Sep-2016 Changed class from Batch to BatchGenerator
## Added option to split batch generator into multiple
## batch generators
import random
import numpy as np
class BatchGenerator:
"""
Object which produces batches from a provided dataset.
"""
def __init__(self, shape_dict):
"""
Setup a new generator for producing batches
"""
self._shape_dict = shape_dict
self._data = []
self._data_keys = shape_dict.keys()
self._shapes = {}
for k in self._data_keys:
self._shapes[k] = shape_dict[k]
self._current_index = 0
def add_sample(self, sample_dict):
"""
Add a sample to the Dataset
"""
self._data.append(sample_dict)
def shuffle(self):
"""
Shuffle the data
"""
random.shuffle(self._data)
def reset(self):
"""
Wrap back around to the start of the list
"""
self._current_index = 0
def split(self, distribution):
"""
Split the dataset in the batch generator into multiple generators
distribution - Percentage of dataset for each batch generator.
This is assumed to sum to 1.0
"""
# Create new batch generators
batch_generators = [BatchGenerator(self._shape_dict) for _ in distribution]
# Add each sample in the dataset to a random generator, as appropriate
for sample in self._data:
rnd = random.random()
idx = 0
# Determine which batch to add this to
while rnd > distribution[idx]:
rnd = rnd - distribution[idx]
idx += 1
# Just in case, assign to the last generator if needed
if idx == len(distribution):
idx = len(distribution) - 1
break
batch_generators[idx].add_sample(sample)
return batch_generators
def get_current_index(self):
"""
Get the current position in the batch
"""
return self._current_index
def set_index(self, index):
"""
"""
self._current_index = index
def get_batch(self, batch_size):
"""
Return an batch of input / output pairs
"""
size = min(len(self._data) - self._current_index, batch_size)
data = {}
for k in self._data_keys:
data[k] = np.zeros((size,) + self._shapes[k])
for i in range(size):
data[k][i,:] = self._data[i + self._current_index][k][:]
self._current_index = self._current_index + size
data['batch_size'] = size
return data
def num_samples(self):
"""
The total number of samples in the batch
"""
return len(self._data)
|
## batch_generator.py
##
##
##
## History:
## 1.0 29-Jun-2016 Initial version
## 1.1 12-Aug-2016 Changed input / output to key / value pairs
## Changed class from Dataset to Batch
## 1.2 30-Sep-2016 Changed class from Batch to BatchGenerator
## Added option to split batch generator into multiple
## batch generators
import random
import numpy as np
class BatchGenerator:
"""
Object which produces batches from a provided dataset.
"""
def __init__(self, shape_dict):
"""
Setup a new generator for producing batches
"""
self._shape_dict = shape_dict
self._data = []
self._data_keys = shape_dict.keys()
self._shapes = {}
for k in self._data_keys:
self._shapes[k] = shape_dict[k]
self._current_index = 0
def add_sample(self, sample_dict):
"""
Add a sample to the Dataset
"""
self._data.append(sample_dict)
def shuffle(self):
"""
Shuffle the data
"""
random.shuffle(self._data)
def reset(self):
"""
Wrap back around to the start of the list
"""
self._current_index = 0
def split(self, distribution):
"""
Split the dataset in the batch generator into multiple generators
distribution - Percentage of dataset for each batch generator.
This is assumed to sum to 1.0
"""
# Create new batch generators
batch_generators = [BatchGenerator(self._shape_dict) for _ in distribution]
# Add each sample in the dataset to a random generator, as appropriate
for sample in self._data:
rnd = random.random()
idx = 0
# Determine which batch to add this to
while rnd > distribution[idx]:
rnd = rnd - distribution[idx]
idx += 1
# Just in case, assign to the last generator if needed
if idx == len(distribution):
idx = len(distribution) - 1
break
batch_generators[idx].add_sample(sample)
return batch_generators
def get_current_index(self):
"""
Get the current position in the batch
"""
return self._current_index
def set_index(self, index):
"""
"""
self._current_index = index
def get_batch(self, batch_size):
"""
Return an batch of input / output pairs
"""
size = min(len(self._data) - self._current_index, batch_size)
data = {}
for k in self._data_keys:
data[k] = np.zeros((size,) + self._shapes[k])
for i in range(size):
data[k][i,:] = self._data[i + self._current_index][k][:]
self._current_index = self._current_index + size
data['batch_size'] = size
return data
def num_samples(self):
"""
The total number of samples in the batch
"""
return len(self._data)
|
en
| 0.732123
|
## batch_generator.py ## ## ## ## History: ## 1.0 29-Jun-2016 Initial version ## 1.1 12-Aug-2016 Changed input / output to key / value pairs ## Changed class from Dataset to Batch ## 1.2 30-Sep-2016 Changed class from Batch to BatchGenerator ## Added option to split batch generator into multiple ## batch generators Object which produces batches from a provided dataset. Setup a new generator for producing batches Add a sample to the Dataset Shuffle the data Wrap back around to the start of the list Split the dataset in the batch generator into multiple generators distribution - Percentage of dataset for each batch generator. This is assumed to sum to 1.0 # Create new batch generators # Add each sample in the dataset to a random generator, as appropriate # Determine which batch to add this to # Just in case, assign to the last generator if needed Get the current position in the batch Return an batch of input / output pairs The total number of samples in the batch
| 3.068677
| 3
|
tests/generator/common/shell.py
|
multiplemonomials/MIRP
| 24
|
6627750
|
<filename>tests/generator/common/shell.py
#!/usr/bin/env python3
def ncart(am):
return ((am+1)*(am+2))//2
def iterate_gaussian(lmn):
am = lmn[0] + lmn[1] + lmn[2]
if lmn[2] >= am:
return None
if lmn[2] < (am - lmn[0]):
return (lmn[0], lmn[1]-1, lmn[2]+1)
else:
return (lmn[0]-1, am-lmn[0]+1, 0)
def all_cartesian_components(am):
all_cart = []
lmn = (am, 0, 0)
while lmn:
all_cart.append(lmn)
lmn = iterate_gaussian(lmn)
return all_cart
|
<filename>tests/generator/common/shell.py
#!/usr/bin/env python3
def ncart(am):
return ((am+1)*(am+2))//2
def iterate_gaussian(lmn):
am = lmn[0] + lmn[1] + lmn[2]
if lmn[2] >= am:
return None
if lmn[2] < (am - lmn[0]):
return (lmn[0], lmn[1]-1, lmn[2]+1)
else:
return (lmn[0]-1, am-lmn[0]+1, 0)
def all_cartesian_components(am):
all_cart = []
lmn = (am, 0, 0)
while lmn:
all_cart.append(lmn)
lmn = iterate_gaussian(lmn)
return all_cart
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 2.84609
| 3
|