gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase, skipUnlessDBFeature
from django.utils._os import upath
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
|
import os
from nose.tools import *
class TempDir(object):
'temporary directory that is automatically deleted when object is released'
def __init__(self):
import tempfile
self.path = tempfile.mkdtemp()
def __str__(self):
return self.path
def __del__(self):
'recursively delete the temp dir and its subdirs'
if self.path is not None:
from shutil import rmtree
rmtree(self.path)
self.path = None
def subfile(self,name):
'return full path by appending name to temp dir path'
return os.path.join(self.path,name)
def copyFile(self,path):
'copy file into the temp dir and return its new path'
filename = self.subfile(os.path.basename(path))
from shutil import copyfile
copyfile(path,filename)
return filename
def get_pygr_data_path(newpath=''):
'force pygr.Data to use newpath, without side-effects on environment'
import pygr.Data
pygr.Data.pygrDataPath = newpath
reload(pygr.Data)
del pygr.Data.pygrDataPath
return pygr.Data
class TempPygrData(TempDir):
'restrict pygr.Data to an initially empty temp directory'
def __init__(self):
TempDir.__init__(self)
self.force_reload(str(self))
def force_reload(self,newpath=None):
if newpath is None:
newpath = self.pygrdatapath
else:
self.pygrdatapath = newpath
return get_pygr_data_path(newpath)
def __del__(self):
get_pygr_data_path(None)
TempDir.__del__(self)
class TempPygrDataMySQL(TempPygrData):
'restrict pygr.Data to an initially empty MySQL resource database'
def __init__(self,dbname='test',args=''):
TempDir.__init__(self) # GENERATE A TEMPORARY TABLENAME
import random
l = [c for c in 'TeMpBiGdAcDy']
random.shuffle(l)
tablename = dbname+'.'+''.join(l)
import pygr.Data
db = pygr.Data.ResourceDBMySQL(tablename+args,createLayer='temp') # CREATE TABLE
self.cursor = db.cursor
self.tablename = tablename
self.force_reload('mysql:'+tablename+args) # RELOAD PYGR.DATA USING NEW TABLE
def __del__(self):
'drop the temporary resource database table'
TempDir.__del__(self)
try:
t = self.tablename
except AttributeError: # APPARENTLY NO TABLE CREATED, SO NOTHING TO DO.
pass
else:
import pygr.Data
self.cursor.execute('drop table if exists %s' % self.tablename)
self.cursor.execute('drop table if exists %s_schema' % self.tablename)
try:
del pygr.Data.getResource.layer['temp'] # REMOVE FROM LAYER INDEX
except KeyError:
pass
get_pygr_data_path(None)
def skiptest():
'cause nose to skip the current test case'
import nose
raise nose.SkipTest
def skip_errors(*skipErrors):
'decorator will force skipping of tests on specified error types'
def decorate(f):
def new_f(*args,**kwargs):
try:
return f(*args,**kwargs)
except skipErrors:
skiptest()
return new_f
return decorate
class PygrDataTextFile(object):
'''dict interface to a text file storage that is pygr.Data-smart,
i.e. it uses pygr.Data.getResource.loads(), so data will be saved
and loaded in terms of pygr.Data resource IDs, which will be loaded
from pygr.Data in the usual way. Intended for storing test results
in a platform-independent text format.'''
def __init__(self,path,mode='r'):
'open in mode r, a or w'
self.path = path
self.mode = mode
if mode=='r' or mode=='a':
ifile = file(path)
import pickle
self.d = pickle.load(ifile)
ifile.close()
elif mode=='w':
self.d = {}
else:
raise ValueError('unknown file mode %s. Use r, w, or a.' % mode)
def __getitem__(self,k):
s = self.d[k]
import pygr.Data
return pygr.Data.getResource.loads(s)
def __setitem__(self,k,obj):
if self.mode=='r':
raise ValueError('this PygrDataTextFile was opened read-only! Use append mode')
import pygr.Data
s = pygr.Data.getResource.dumps(obj)
self.d[k] = s
self.save()
def __delitem__(self,k):
if self.mode=='r':
raise ValueError('this PygrDataTextFile was opened read-only! Use append mode')
del self.d[k]
self.save()
def __iter__(self): return iter(self.d)
def save(self):
'save our dictionary to text file by pickling'
if self.mode=='r':
raise ValueError('this PygrDataTextFile was opened read-only! Use append mode')
ifile = file(self.path,'w')
import pickle
pickle.dump(self.d,ifile)
ifile.close()
def find_unused_port(port=5123):
'look for an unused port begining at the specified port number.'
import xmlrpclib,socket
while port<9999:
s = xmlrpclib.ServerProxy('http://localhost:%d' %port)
try:
s.listMethods()
port += 1
except socket.error:
return port
raise OSError('unable to find any open port')
class TestXMLRPCServer(object):
"""runs XMLRPC server in the background with a list of pygr.Data resources
Makes server exit when this object is released.
Because we want this to work even on Windows (gag! choke!),
we can't use fork, backgrounding or any other quasi-sensible method for
running the server process in the background. So we just use a separate
thread to keep our caller from blocking...
Optional arguments:
PYGRDATAPATH: passed to the server process command line as its PYGRDATAPATH
checkResources: if True, first check that all pygrDataNames are loadable."""
def __init__(self,*pygrDataNames,**kwargs):
'starts server, returns without blocking'
self.port = find_unused_port()
import pygr.Data
try:
if kwargs['checkResources']:
for name in pygrDataNames: # ENSURE ALL RES FOR THE TEST ARE AVAILABLE
obj = pygr.Data.getResource(name)
except KeyError:
pass
self.pygrDataNames = pygrDataNames
try:
self.pygrDataPath = kwargs['PYGRDATAPATH'] # USER-SPECIFIED PATH
except KeyError:
self.pygrDataPath = 'PYGRDATAPATH' # DEFAULT: JUST USE ENV AS USUAL
try:
self.downloadDB = 'downloadDB='+kwargs['downloadDB']
except KeyError:
self.downloadDB = ''
from threading import Thread
t = Thread(target=self.run_server)
t.start()
import time
time.sleep(1) # WAIT TO MAKE SURE THE CHILD IS STARTED
def run_server(self):
'this method blocks, so run it in a separate thread'
print 'starting server on port',self.port
import sys
os.system('%s pygrdata_server.py %d %s %s %s'
%(sys.executable,self.port,self.pygrDataPath,
self.downloadDB,' '.join(self.pygrDataNames)))
print 'server exited.'
def access_server(self):
'force pygr.Data to only use the XMLRPC server'
return get_pygr_data_path('http://localhost:%d' % self.port)
def close(self):
import xmlrpclib
s = xmlrpclib.ServerProxy('http://localhost:%d' % self.port)
s.exit_now() # TELL THE SERVER TO EXIT
get_pygr_data_path(None) # FORCE IT TO RESTORE STANDARD PYGRDATAPATH
def approximate_cmp(x,y,delta):
'''expects two lists of tuples. Performs comparison as usual,
except that numeric types are considered equal if they differ by
less than delta'''
diff = cmp(len(x),len(y))
if diff != 0:
return diff
x.sort() # SORT TO ENSURE IN SAME ORDER...
y.sort()
for i in range(len(x)):
s = x[i]
t = y[i]
diff = cmp(len(s),len(t))
if diff != 0:
return diff
for j in range(len(s)):
u = s[j]
v = t[j]
if isinstance(u,int) or isinstance(u,float):
diff = u - v
if diff < -delta:
return -1
elif diff >delta:
return 1
else:
diff = cmp(u,v)
if diff != 0:
return diff
return 0
class TestBase(object):
'''base class for tests that can skip on setup errors.
You can subclass the following attributes:
Class attribute _skipSetupErrors gives tuple of
setup error types that will cause the test to be skipped.
Class attribute _testLevel, if provided, should be an
integer indicating the intensity level of the test,
starting from 0 (lowest). This will be compared against
the environment variable PYGR_TEST_LEVEL, and if greater,
will force skipping of this test class.'''
_skipSetupErrors = (KeyError,AttributeError,IOError)
def setup(self):
if not self.is_approved():
skiptest()
try:
m = self.try_setup
except AttributeError:
return
try:
m()
except self._skipSetupErrors:
skiptest()
def is_approved(self):
'True if this test class is approved'
try:
level = self._testLevel
except AttributeError:
return True # NO LEVEL, SO NO APPROVAL REQUIRED
import os
try:
approved = int(os.environ['PYGR_TEST_LEVEL'])
except (KeyError,ValueError):
approved = 0
if level>approved:
return False
else:
return True
## def teardown(self):
## try:
## self.tear_me_down()
## except KeyError:
## skiptest()
|
|
#!/usr/bin/env python2
# Copyright (c) 2007-2014 Heikki Hokkanen <hoxu@users.sf.net> & others (see doc/AUTHOR)
# GPLv2 / GPLv3
import datetime
import getopt
import glob
import os
import pickle
import platform
import re
import shutil
import subprocess
import sys
import time
import zlib
if sys.version_info < (2, 6):
print >> sys.stderr, "Python 2.6 or higher is required for gitstats"
sys.exit(1)
from multiprocessing import Pool
os.environ['LC_ALL'] = 'C'
GNUPLOT_COMMON = 'set terminal png transparent size 640,240\nset size 1.0,1.0\n'
ON_LINUX = (platform.system() == 'Linux')
WEEKDAYS = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
exectime_internal = 0.0
exectime_external = 0.0
time_start = time.time()
# By default, gnuplot is searched from path, but can be overridden with the
# environment variable "GNUPLOT"
gnuplot_cmd = 'gnuplot'
if 'GNUPLOT' in os.environ:
gnuplot_cmd = os.environ['GNUPLOT']
conf = {
'max_domains': 10,
'max_ext_length': 10,
'style': 'gitstats.css',
'max_authors': 20,
'authors_top': 5,
'commit_begin': '',
'commit_end': 'HEAD',
'linear_linestats': 1,
'project_name': '',
'processes': 8,
'start_date': ''
}
def getpipeoutput(cmds, quiet=False):
global exectime_external
start = time.time()
if not quiet and ON_LINUX and os.isatty(1):
print
'>> ' + ' | '.join(cmds),
sys.stdout.flush()
p = subprocess.Popen(cmds[0], stdout=subprocess.PIPE, shell=True)
processes = [p]
for x in cmds[1:]:
p = subprocess.Popen(x, stdin=p.stdout, stdout=subprocess.PIPE, shell=True)
processes.append(p)
output = p.communicate()[0]
for p in processes:
p.wait()
end = time.time()
if not quiet:
if ON_LINUX and os.isatty(1):
print
'\r',
print
'[%.5f] >> %s' % (end - start, ' | '.join(cmds))
exectime_external += (end - start)
return output.rstrip('\n')
def getlogrange(defaultrange='HEAD', end_only=True):
commit_range = getcommitrange(defaultrange, end_only)
if len(conf['start_date']) > 0:
return '--since="%s" "%s"' % (conf['start_date'], commit_range)
return commit_range
def getcommitrange(defaultrange='HEAD', end_only=False):
if len(conf['commit_end']) > 0:
if end_only or len(conf['commit_begin']) == 0:
return conf['commit_end']
return '%s..%s' % (conf['commit_begin'], conf['commit_end'])
return defaultrange
def getkeyssortedbyvalues(dict):
return map(lambda el: el[1], sorted(map(lambda el: (el[1], el[0]), dict.items())))
# dict['author'] = { 'commits': 512 } - ...key(dict, 'commits')
def getkeyssortedbyvaluekey(d, key):
return map(lambda el: el[1], sorted(map(lambda el: (d[el][key], el), d.keys())))
def getstatsummarycounts(line):
numbers = re.findall('\d+', line)
if len(numbers) == 1:
# neither insertions nor deletions: may probably only happen for "0 files changed"
numbers.append(0);
numbers.append(0);
elif len(numbers) == 2 and line.find('(+)') != -1:
numbers.append(0); # only insertions were printed on line
elif len(numbers) == 2 and line.find('(-)') != -1:
numbers.insert(1, 0); # only deletions were printed on line
return numbers
VERSION = 0
def getversion():
global VERSION
if VERSION == 0:
gitstats_repo = os.path.dirname(os.path.abspath(__file__))
VERSION = getpipeoutput(["git --git-dir=%s/.git --work-tree=%s rev-parse --short %s" %
(gitstats_repo, gitstats_repo, getcommitrange('HEAD').split('\n')[0])])
return VERSION
def getgitversion():
return getpipeoutput(['git --version']).split('\n')[0]
def getgnuplotversion():
return getpipeoutput(['%s --version' % gnuplot_cmd]).split('\n')[0]
def getnumoffilesfromrev(time_rev):
"""
Get number of files changed in commit
"""
time, rev = time_rev
return (int(time), rev, int(getpipeoutput(['git ls-tree -r --name-only "%s"' % rev, 'wc -l']).split('\n')[0]))
def getnumoflinesinblob(ext_blob):
"""
Get number of lines in blob
"""
ext, blob_id = ext_blob
return (ext, blob_id, int(getpipeoutput(['git cat-file blob %s' % blob_id, 'wc -l']).split()[0]))
class DataCollector:
"""Manages data collection from a revision control repository."""
def __init__(self):
self.stamp_created = time.time()
self.cache = {}
self.total_authors = 0
self.activity_by_hour_of_day = {} # hour -> commits
self.activity_by_day_of_week = {} # day -> commits
self.activity_by_month_of_year = {} # month [1-12] -> commits
self.activity_by_hour_of_week = {} # weekday -> hour -> commits
self.activity_by_hour_of_day_busiest = 0
self.activity_by_hour_of_week_busiest = 0
self.activity_by_year_week = {} # yy_wNN -> commits
self.activity_by_year_week_peak = 0
self.authors = {} # name -> {commits, first_commit_stamp, last_commit_stamp, last_active_day, active_days, lines_added, lines_removed}
self.total_commits = 0
self.total_files = 0
self.authors_by_commits = 0
# domains
self.domains = {} # domain -> commits
# author of the month
self.author_of_month = {} # month -> author -> commits
self.author_of_year = {} # year -> author -> commits
self.commits_by_month = {} # month -> commits
self.commits_by_year = {} # year -> commits
self.lines_added_by_month = {} # month -> lines added
self.lines_added_by_year = {} # year -> lines added
self.lines_removed_by_month = {} # month -> lines removed
self.lines_removed_by_year = {} # year -> lines removed
self.first_commit_stamp = 0
self.last_commit_stamp = 0
self.last_active_day = None
self.active_days = set()
# lines
self.total_lines = 0
self.total_lines_added = 0
self.total_lines_removed = 0
# size
self.total_size = 0
# timezone
self.commits_by_timezone = {} # timezone -> commits
# tags
self.tags = {}
self.files_by_stamp = {} # stamp -> files
# extensions
self.extensions = {} # extension -> files, lines
# line statistics
self.changes_by_date = {} # stamp -> { files, ins, del }
##
# This should be the main function to extract data from the repository.
def collect(self, dir):
self.dir = dir
if len(conf['project_name']) == 0:
self.projectname = os.path.basename(os.path.abspath(dir))
else:
self.projectname = conf['project_name']
##
# Load cacheable data
def loadCache(self, cachefile):
if not os.path.exists(cachefile):
return
print
'Loading cache...'
f = open(cachefile, 'rb')
try:
self.cache = pickle.loads(zlib.decompress(f.read()))
except:
# temporary hack to upgrade non-compressed caches
f.seek(0)
self.cache = pickle.load(f)
f.close()
##
# Produce any additional statistics from the extracted data.
def refine(self):
pass
##
# : get a dictionary of author
def getAuthorInfo(self, author):
return None
def getActivityByDayOfWeek(self):
return {}
def getActivityByHourOfDay(self):
return {}
# : get a dictionary of domains
def getDomainInfo(self, domain):
return None
##
# Get a list of authors
def getAuthors(self):
return []
def getFirstCommitDate(self):
return datetime.datetime.now()
def getLastCommitDate(self):
return datetime.datetime.now()
def getStampCreated(self):
return self.stamp_created
def getTags(self):
return []
def getTotalAuthors(self):
return -1
def getTotalCommits(self):
return -1
def getTotalFiles(self):
return -1
def getTotalLOC(self):
return -1
##
# Save cacheable data
def saveCache(self, cachefile):
print
'Saving cache...'
tempfile = cachefile + '.tmp'
f = open(tempfile, 'wb')
# pickle.dump(self.cache, f)
data = zlib.compress(pickle.dumps(self.cache))
f.write(data)
f.close()
try:
os.remove(cachefile)
except OSError:
pass
os.rename(tempfile, cachefile)
class GitDataCollector(DataCollector):
def collect(self, dir):
DataCollector.collect(self, dir)
self.total_authors += int(getpipeoutput(['git shortlog -s %s' % getlogrange(), 'wc -l']))
# self.total_lines = int(getoutput('git-ls-files -z |xargs -0 cat |wc -l'))
# tags
lines = getpipeoutput(['git show-ref --tags']).split('\n')
for line in lines:
if len(line) == 0:
continue
(hash, tag) = line.split(' ')
tag = tag.replace('refs/tags/', '')
output = getpipeoutput(['git log "%s" --pretty=format:"%%at %%aN" -n 1' % hash])
if len(output) > 0:
parts = output.split(' ')
stamp = 0
try:
stamp = int(parts[0])
except ValueError:
stamp = 0
self.tags[tag] = {'stamp': stamp, 'hash': hash,
'date': datetime.datetime.fromtimestamp(stamp).strftime('%Y-%m-%d'), 'commits': 0,
'authors': {}}
# collect info on tags, starting from latest
tags_sorted_by_date_desc = map(lambda el: el[1],
reversed(sorted(map(lambda el: (el[1]['date'], el[0]), self.tags.items()))))
prev = None
for tag in reversed(tags_sorted_by_date_desc):
cmd = 'git shortlog -s "%s"' % tag
if prev != None:
cmd += ' "^%s"' % prev
output = getpipeoutput([cmd])
if len(output) == 0:
continue
prev = tag
for line in output.split('\n'):
parts = re.split('\s+', line, 2)
commits = int(parts[1])
author = parts[2]
self.tags[tag]['commits'] += commits
self.tags[tag]['authors'][author] = commits
# Collect revision statistics
# Outputs "<stamp> <date> <time> <timezone> <author> '<' <mail> '>'"
lines = getpipeoutput(
['git rev-list --pretty=format:"%%at %%ai %%aN <%%aE>" %s' % getlogrange('HEAD'), 'grep -v ^commit']).split(
'\n')
for line in lines:
parts = line.split(' ', 4)
author = ''
try:
stamp = int(parts[0])
except ValueError:
stamp = 0
timezone = parts[3]
author, mail = parts[4].split('<', 1)
author = author.rstrip()
mail = mail.rstrip('>')
domain = '?'
if mail.find('@') != -1:
domain = mail.rsplit('@', 1)[1]
date = datetime.datetime.fromtimestamp(float(stamp))
# First and last commit stamp (may be in any order because of cherry-picking and patches)
if stamp > self.last_commit_stamp:
self.last_commit_stamp = stamp
if self.first_commit_stamp == 0 or stamp < self.first_commit_stamp:
self.first_commit_stamp = stamp
# activity
# hour
hour = date.hour
self.activity_by_hour_of_day[hour] = self.activity_by_hour_of_day.get(hour, 0) + 1
# most active hour?
if self.activity_by_hour_of_day[hour] > self.activity_by_hour_of_day_busiest:
self.activity_by_hour_of_day_busiest = self.activity_by_hour_of_day[hour]
# day of week
day = date.weekday()
self.activity_by_day_of_week[day] = self.activity_by_day_of_week.get(day, 0) + 1
# domain stats
if domain not in self.domains:
self.domains[domain] = {}
# commits
self.domains[domain]['commits'] = self.domains[domain].get('commits', 0) + 1
# hour of week
if day not in self.activity_by_hour_of_week:
self.activity_by_hour_of_week[day] = {}
self.activity_by_hour_of_week[day][hour] = self.activity_by_hour_of_week[day].get(hour, 0) + 1
# most active hour?
if self.activity_by_hour_of_week[day][hour] > self.activity_by_hour_of_week_busiest:
self.activity_by_hour_of_week_busiest = self.activity_by_hour_of_week[day][hour]
# month of year
month = date.month
self.activity_by_month_of_year[month] = self.activity_by_month_of_year.get(month, 0) + 1
# yearly/weekly activity
yyw = date.strftime('%Y-%W')
self.activity_by_year_week[yyw] = self.activity_by_year_week.get(yyw, 0) + 1
if self.activity_by_year_week_peak < self.activity_by_year_week[yyw]:
self.activity_by_year_week_peak = self.activity_by_year_week[yyw]
# author stats
if author not in self.authors:
self.authors[author] = {}
# commits, note again that commits may be in any date order because of cherry-picking and patches
if 'last_commit_stamp' not in self.authors[author]:
self.authors[author]['last_commit_stamp'] = stamp
if stamp > self.authors[author]['last_commit_stamp']:
self.authors[author]['last_commit_stamp'] = stamp
if 'first_commit_stamp' not in self.authors[author]:
self.authors[author]['first_commit_stamp'] = stamp
if stamp < self.authors[author]['first_commit_stamp']:
self.authors[author]['first_commit_stamp'] = stamp
# author of the month/year
yymm = date.strftime('%Y-%m')
if yymm in self.author_of_month:
self.author_of_month[yymm][author] = self.author_of_month[yymm].get(author, 0) + 1
else:
self.author_of_month[yymm] = {}
self.author_of_month[yymm][author] = 1
self.commits_by_month[yymm] = self.commits_by_month.get(yymm, 0) + 1
yy = date.year
if yy in self.author_of_year:
self.author_of_year[yy][author] = self.author_of_year[yy].get(author, 0) + 1
else:
self.author_of_year[yy] = {}
self.author_of_year[yy][author] = 1
self.commits_by_year[yy] = self.commits_by_year.get(yy, 0) + 1
# authors: active days
yymmdd = date.strftime('%Y-%m-%d')
if 'last_active_day' not in self.authors[author]:
self.authors[author]['last_active_day'] = yymmdd
self.authors[author]['active_days'] = set([yymmdd])
elif yymmdd != self.authors[author]['last_active_day']:
self.authors[author]['last_active_day'] = yymmdd
self.authors[author]['active_days'].add(yymmdd)
# project: active days
if yymmdd != self.last_active_day:
self.last_active_day = yymmdd
self.active_days.add(yymmdd)
# timezone
self.commits_by_timezone[timezone] = self.commits_by_timezone.get(timezone, 0) + 1
# outputs "<stamp> <files>" for each revision
revlines = getpipeoutput(
['git rev-list --pretty=format:"%%at %%T" %s' % getlogrange('HEAD'), 'grep -v ^commit']).strip().split('\n')
lines = []
revs_to_read = []
time_rev_count = []
# Look up rev in cache and take info from cache if found
# If not append rev to list of rev to read from repo
for revline in revlines:
time, rev = revline.split(' ')
# if cache empty then add time and rev to list of new rev's
# otherwise try to read needed info from cache
if 'files_in_tree' not in self.cache.keys():
revs_to_read.append((time, rev))
continue
if rev in self.cache['files_in_tree'].keys():
lines.append('%d %d' % (int(time), self.cache['files_in_tree'][rev]))
else:
revs_to_read.append((time, rev))
# Read revisions from repo
pool = Pool(processes=conf['processes'])
time_rev_count = pool.map(getnumoffilesfromrev, revs_to_read)
pool.terminate()
pool.join()
# Update cache with new revisions and append then to general list
for (time, rev, count) in time_rev_count:
if 'files_in_tree' not in self.cache:
self.cache['files_in_tree'] = {}
self.cache['files_in_tree'][rev] = count
lines.append('%d %d' % (int(time), count))
self.total_commits += len(lines)
for line in lines:
parts = line.split(' ')
if len(parts) != 2:
continue
(stamp, files) = parts[0:2]
try:
self.files_by_stamp[int(stamp)] = int(files)
except ValueError:
print
'Warning: failed to parse line "%s"' % line
# extensions and size of files
lines = getpipeoutput(['git ls-tree -r -l -z %s' % getcommitrange('HEAD', end_only=True)]).split('\000')
blobs_to_read = []
for line in lines:
if len(line) == 0:
continue
parts = re.split('\s+', line, 4)
if parts[0] == '160000' and parts[3] == '-':
# skip submodules
continue
blob_id = parts[2]
size = int(parts[3])
fullpath = parts[4]
self.total_size += size
self.total_files += 1
filename = fullpath.split('/')[-1] # strip directories
if filename.find('.') == -1 or filename.rfind('.') == 0:
ext = ''
else:
ext = filename[(filename.rfind('.') + 1):]
if len(ext) > conf['max_ext_length']:
ext = ''
if ext not in self.extensions:
self.extensions[ext] = {'files': 0, 'lines': 0}
self.extensions[ext]['files'] += 1
# if cache empty then add ext and blob id to list of new blob's
# otherwise try to read needed info from cache
if 'lines_in_blob' not in self.cache.keys():
blobs_to_read.append((ext, blob_id))
continue
if blob_id in self.cache['lines_in_blob'].keys():
self.extensions[ext]['lines'] += self.cache['lines_in_blob'][blob_id]
else:
blobs_to_read.append((ext, blob_id))
# Get info abount line count for new blob's that wasn't found in cache
pool = Pool(processes=conf['processes'])
ext_blob_linecount = pool.map(getnumoflinesinblob, blobs_to_read)
pool.terminate()
pool.join()
# Update cache and write down info about number of number of lines
for (ext, blob_id, linecount) in ext_blob_linecount:
if 'lines_in_blob' not in self.cache:
self.cache['lines_in_blob'] = {}
self.cache['lines_in_blob'][blob_id] = linecount
self.extensions[ext]['lines'] += self.cache['lines_in_blob'][blob_id]
# line statistics
# outputs:
# N files changed, N insertions (+), N deletions(-)
# <stamp> <author>
self.changes_by_date = {} # stamp -> { files, ins, del }
# computation of lines of code by date is better done
# on a linear history.
extra = ''
if conf['linear_linestats']:
extra = '--first-parent -m'
lines = getpipeoutput(
['git log --shortstat %s --pretty=format:"%%at %%aN" %s' % (extra, getlogrange('HEAD'))]).split('\n')
lines.reverse()
files = 0;
inserted = 0;
deleted = 0;
total_lines = 0
author = None
for line in lines:
if len(line) == 0:
continue
# <stamp> <author>
if re.search('files? changed', line) == None:
pos = line.find(' ')
if pos != -1:
try:
(stamp, author) = (int(line[:pos]), line[pos + 1:])
self.changes_by_date[stamp] = {'files': files, 'ins': inserted, 'del': deleted,
'lines': total_lines}
date = datetime.datetime.fromtimestamp(stamp)
yymm = date.strftime('%Y-%m')
self.lines_added_by_month[yymm] = self.lines_added_by_month.get(yymm, 0) + inserted
self.lines_removed_by_month[yymm] = self.lines_removed_by_month.get(yymm, 0) + deleted
yy = date.year
self.lines_added_by_year[yy] = self.lines_added_by_year.get(yy, 0) + inserted
self.lines_removed_by_year[yy] = self.lines_removed_by_year.get(yy, 0) + deleted
files, inserted, deleted = 0, 0, 0
except ValueError:
print
'Warning: unexpected line "%s"' % line
else:
print
'Warning: unexpected line "%s"' % line
else:
numbers = getstatsummarycounts(line)
if len(numbers) == 3:
(files, inserted, deleted) = map(lambda el: int(el), numbers)
total_lines += inserted
total_lines -= deleted
self.total_lines_added += inserted
self.total_lines_removed += deleted
else:
print
'Warning: failed to handle line "%s"' % line
(files, inserted, deleted) = (0, 0, 0)
# self.changes_by_date[stamp] = { 'files': files, 'ins': inserted, 'del': deleted }
self.total_lines += total_lines
# Per-author statistics
# defined for stamp, author only if author commited at this timestamp.
self.changes_by_date_by_author = {} # stamp -> author -> lines_added
# Similar to the above, but never use --first-parent
# (we need to walk through every commit to know who
# committed what, not just through mainline)
lines = getpipeoutput(
['git log --shortstat --date-order --pretty=format:"%%at %%aN" %s' % (getlogrange('HEAD'))]).split('\n')
lines.reverse()
files = 0;
inserted = 0;
deleted = 0
author = None
stamp = 0
for line in lines:
if len(line) == 0:
continue
# <stamp> <author>
if re.search('files? changed', line) == None:
pos = line.find(' ')
if pos != -1:
try:
oldstamp = stamp
(stamp, author) = (int(line[:pos]), line[pos + 1:])
if oldstamp > stamp:
# clock skew, keep old timestamp to avoid having ugly graph
stamp = oldstamp
if author not in self.authors:
self.authors[author] = {'lines_added': 0, 'lines_removed': 0, 'commits': 0}
self.authors[author]['commits'] = self.authors[author].get('commits', 0) + 1
self.authors[author]['lines_added'] = self.authors[author].get('lines_added', 0) + inserted
self.authors[author]['lines_removed'] = self.authors[author].get('lines_removed', 0) + deleted
if stamp not in self.changes_by_date_by_author:
self.changes_by_date_by_author[stamp] = {}
if author not in self.changes_by_date_by_author[stamp]:
self.changes_by_date_by_author[stamp][author] = {}
self.changes_by_date_by_author[stamp][author]['lines_added'] = self.authors[author][
'lines_added']
self.changes_by_date_by_author[stamp][author]['commits'] = self.authors[author]['commits']
files, inserted, deleted = 0, 0, 0
except ValueError:
print
'Warning: unexpected line "%s"' % line
else:
print
'Warning: unexpected line "%s"' % line
else:
numbers = getstatsummarycounts(line);
if len(numbers) == 3:
(files, inserted, deleted) = map(lambda el: int(el), numbers)
else:
print
'Warning: failed to handle line "%s"' % line
(files, inserted, deleted) = (0, 0, 0)
def refine(self):
# authors
# name -> {place_by_commits, commits_frac, date_first, date_last, timedelta}
self.authors_by_commits = getkeyssortedbyvaluekey(self.authors, 'commits')
self.authors_by_commits.reverse() # most first
for i, name in enumerate(self.authors_by_commits):
self.authors[name]['place_by_commits'] = i + 1
for name in self.authors.keys():
a = self.authors[name]
a['commits_frac'] = (100 * float(a['commits'])) / self.getTotalCommits()
date_first = datetime.datetime.fromtimestamp(a['first_commit_stamp'])
date_last = datetime.datetime.fromtimestamp(a['last_commit_stamp'])
delta = date_last - date_first
a['date_first'] = date_first.strftime('%Y-%m-%d')
a['date_last'] = date_last.strftime('%Y-%m-%d')
a['timedelta'] = delta
if 'lines_added' not in a: a['lines_added'] = 0
if 'lines_removed' not in a: a['lines_removed'] = 0
def getActiveDays(self):
return self.active_days
def getActivityByDayOfWeek(self):
return self.activity_by_day_of_week
def getActivityByHourOfDay(self):
return self.activity_by_hour_of_day
def getAuthorInfo(self, author):
return self.authors[author]
def getAuthors(self, limit=None):
res = getkeyssortedbyvaluekey(self.authors, 'commits')
res.reverse()
return res[:limit]
def getCommitDeltaDays(self):
return (self.last_commit_stamp / 86400 - self.first_commit_stamp / 86400) + 1
def getDomainInfo(self, domain):
return self.domains[domain]
def getDomains(self):
return self.domains.keys()
def getFirstCommitDate(self):
return datetime.datetime.fromtimestamp(self.first_commit_stamp)
def getLastCommitDate(self):
return datetime.datetime.fromtimestamp(self.last_commit_stamp)
def getTags(self):
lines = getpipeoutput(['git show-ref --tags', 'cut -d/ -f3'])
return lines.split('\n')
def getTagDate(self, tag):
return self.revToDate('tags/' + tag)
def getTotalAuthors(self):
return self.total_authors
def getTotalCommits(self):
return self.total_commits
def getTotalFiles(self):
return self.total_files
def getTotalLOC(self):
return self.total_lines
def getTotalSize(self):
return self.total_size
def revToDate(self, rev):
stamp = int(getpipeoutput(['git log --pretty=format:%%at "%s" -n 1' % rev]))
return datetime.datetime.fromtimestamp(stamp).strftime('%Y-%m-%d')
class ReportCreator:
"""Creates the actual report based on given data."""
def __init__(self):
pass
def create(self, data, path):
self.data = data
self.path = path
def html_linkify(text):
return text.lower().replace(' ', '_')
def html_header(level, text):
name = html_linkify(text)
return '\n<h%d id="%s"><a href="#%s">%s</a></h%d>\n\n' % (level, name, name, text, level)
class HTMLReportCreator(ReportCreator):
def create(self, data, path):
ReportCreator.create(self, data, path)
self.title = data.projectname
# copy static files. Looks in the binary directory, ../share/gitstats and /usr/share/gitstats
binarypath = os.path.dirname(os.path.abspath(__file__))
secondarypath = os.path.join(binarypath, '..', 'share', 'gitstats')
basedirs = [binarypath, secondarypath, '/usr/share/gitstats']
for file in (conf['style'], 'sortable.js', 'arrow-up.gif', 'arrow-down.gif', 'arrow-none.gif'):
for base in basedirs:
src = base + '/' + file
if os.path.exists(src):
shutil.copyfile(src, path + '/' + file)
break
else:
print
'Warning: "%s" not found, so not copied (searched: %s)' % (file, basedirs)
f = open(path + "/index.html", 'w')
format = '%Y-%m-%d %H:%M:%S'
self.printHeader(f)
f.write('<h1>GitStats - %s</h1>' % data.projectname)
self.printNav(f)
f.write('<dl>')
f.write('<dt>Project name</dt><dd>%s</dd>' % (data.projectname))
f.write('<dt>Generated</dt><dd>%s (in %d seconds)</dd>' % (
datetime.datetime.now().strftime(format), time.time() - data.getStampCreated()))
f.write(
'<dt>Generator</dt><dd><a href="http://gitstats.sourceforge.net/">GitStats</a> (version %s), %s, %s</dd>' % (
getversion(), getgitversion(), getgnuplotversion()))
f.write('<dt>Report Period</dt><dd>%s to %s</dd>' % (
data.getFirstCommitDate().strftime(format), data.getLastCommitDate().strftime(format)))
f.write('<dt>Age</dt><dd>%d days, %d active days (%3.2f%%)</dd>' % (
data.getCommitDeltaDays(), len(data.getActiveDays()),
(100.0 * len(data.getActiveDays()) / data.getCommitDeltaDays())))
f.write('<dt>Total Files</dt><dd>%s</dd>' % data.getTotalFiles())
f.write('<dt>Total Lines of Code</dt><dd>%s (%d added, %d removed)</dd>' % (
data.getTotalLOC(), data.total_lines_added, data.total_lines_removed))
f.write('<dt>Total Commits</dt><dd>%s (average %.1f commits per active day, %.1f per all days)</dd>' % (
data.getTotalCommits(), float(data.getTotalCommits()) / len(data.getActiveDays()),
float(data.getTotalCommits()) / data.getCommitDeltaDays()))
f.write('<dt>Authors</dt><dd>%s (average %.1f commits per author)</dd>' % (
data.getTotalAuthors(), (1.0 * data.getTotalCommits()) / data.getTotalAuthors()))
f.write('</dl>')
f.write('</body>\n</html>')
f.close()
###
# Activity
f = open(path + '/activity.html', 'w')
self.printHeader(f)
f.write('<h1>Activity</h1>')
self.printNav(f)
# f.write('<h2>Last 30 days</h2>')
# f.write('<h2>Last 12 months</h2>')
# Weekly activity
WEEKS = 32
f.write(html_header(2, 'Weekly activity'))
f.write('<p>Last %d weeks</p>' % WEEKS)
# generate weeks to show (previous N weeks from now)
now = datetime.datetime.now()
deltaweek = datetime.timedelta(7)
weeks = []
stampcur = now
for i in range(0, WEEKS):
weeks.insert(0, stampcur.strftime('%Y-%W'))
stampcur -= deltaweek
# top row: commits & bar
f.write('<table class="noborders"><tr>')
for i in range(0, WEEKS):
commits = 0
if weeks[i] in data.activity_by_year_week:
commits = data.activity_by_year_week[weeks[i]]
percentage = 0
if weeks[i] in data.activity_by_year_week:
percentage = float(data.activity_by_year_week[weeks[i]]) / data.activity_by_year_week_peak
height = max(1, int(200 * percentage))
f.write(
'<td style="text-align: center; vertical-align: bottom">%d<div style="display: block; background-color: red; width: 20px; height: %dpx"></div></td>' % (
commits, height))
# bottom row: year/week
f.write('</tr><tr>')
for i in range(0, WEEKS):
f.write('<td>%s</td>' % (WEEKS - i))
f.write('</tr></table>')
# Hour of Day
f.write(html_header(2, 'Hour of Day'))
hour_of_day = data.getActivityByHourOfDay()
f.write('<table><tr><th>Hour</th>')
for i in range(0, 24):
f.write('<th>%d</th>' % i)
f.write('</tr>\n<tr><th>Commits</th>')
fp = open(path + '/hour_of_day.dat', 'w')
for i in range(0, 24):
if i in hour_of_day:
r = 127 + int((float(hour_of_day[i]) / data.activity_by_hour_of_day_busiest) * 128)
f.write('<td style="background-color: rgb(%d, 0, 0)">%d</td>' % (r, hour_of_day[i]))
fp.write('%d %d\n' % (i, hour_of_day[i]))
else:
f.write('<td>0</td>')
fp.write('%d 0\n' % i)
fp.close()
f.write('</tr>\n<tr><th>%</th>')
totalcommits = data.getTotalCommits()
for i in range(0, 24):
if i in hour_of_day:
r = 127 + int((float(hour_of_day[i]) / data.activity_by_hour_of_day_busiest) * 128)
f.write('<td style="background-color: rgb(%d, 0, 0)">%.2f</td>' % (
r, (100.0 * hour_of_day[i]) / totalcommits))
else:
f.write('<td>0.00</td>')
f.write('</tr></table>')
f.write('<img src="hour_of_day.png" alt="Hour of Day">')
fg = open(path + '/hour_of_day.dat', 'w')
for i in range(0, 24):
if i in hour_of_day:
fg.write('%d %d\n' % (i + 1, hour_of_day[i]))
else:
fg.write('%d 0\n' % (i + 1))
fg.close()
# Day of Week
f.write(html_header(2, 'Day of Week'))
day_of_week = data.getActivityByDayOfWeek()
f.write('<div class="vtable"><table>')
f.write('<tr><th>Day</th><th>Total (%)</th></tr>')
fp = open(path + '/day_of_week.dat', 'w')
for d in range(0, 7):
commits = 0
if d in day_of_week:
commits = day_of_week[d]
fp.write('%d %s %d\n' % (d + 1, WEEKDAYS[d], commits))
f.write('<tr>')
f.write('<th>%s</th>' % (WEEKDAYS[d]))
if d in day_of_week:
f.write('<td>%d (%.2f%%)</td>' % (day_of_week[d], (100.0 * day_of_week[d]) / totalcommits))
else:
f.write('<td>0</td>')
f.write('</tr>')
f.write('</table></div>')
f.write('<img src="day_of_week.png" alt="Day of Week">')
fp.close()
# Hour of Week
f.write(html_header(2, 'Hour of Week'))
f.write('<table>')
f.write('<tr><th>Weekday</th>')
for hour in range(0, 24):
f.write('<th>%d</th>' % (hour))
f.write('</tr>')
for weekday in range(0, 7):
f.write('<tr><th>%s</th>' % (WEEKDAYS[weekday]))
for hour in range(0, 24):
try:
commits = data.activity_by_hour_of_week[weekday][hour]
except KeyError:
commits = 0
if commits != 0:
f.write('<td')
r = 127 + int((float(commits) / data.activity_by_hour_of_week_busiest) * 128)
f.write(' style="background-color: rgb(%d, 0, 0)"' % r)
f.write('>%d</td>' % commits)
else:
f.write('<td></td>')
f.write('</tr>')
f.write('</table>')
# Month of Year
f.write(html_header(2, 'Month of Year'))
f.write('<div class="vtable"><table>')
f.write('<tr><th>Month</th><th>Commits (%)</th></tr>')
fp = open(path + '/month_of_year.dat', 'w')
for mm in range(1, 13):
commits = 0
if mm in data.activity_by_month_of_year:
commits = data.activity_by_month_of_year[mm]
f.write(
'<tr><td>%d</td><td>%d (%.2f %%)</td></tr>' % (mm, commits, (100.0 * commits) / data.getTotalCommits()))
fp.write('%d %d\n' % (mm, commits))
fp.close()
f.write('</table></div>')
f.write('<img src="month_of_year.png" alt="Month of Year">')
# Commits by year/month
f.write(html_header(2, 'Commits by year/month'))
f.write(
'<div class="vtable"><table><tr><th>Month</th><th>Commits</th><th>Lines added</th><th>Lines removed</th></tr>')
for yymm in reversed(sorted(data.commits_by_month.keys())):
f.write('<tr><td>%s</td><td>%d</td><td>%d</td><td>%d</td></tr>' % (
yymm, data.commits_by_month.get(yymm, 0), data.lines_added_by_month.get(yymm, 0),
data.lines_removed_by_month.get(yymm, 0)))
f.write('</table></div>')
f.write('<img src="commits_by_year_month.png" alt="Commits by year/month">')
fg = open(path + '/commits_by_year_month.dat', 'w')
for yymm in sorted(data.commits_by_month.keys()):
fg.write('%s %s\n' % (yymm, data.commits_by_month[yymm]))
fg.close()
# Commits by year
f.write(html_header(2, 'Commits by Year'))
f.write(
'<div class="vtable"><table><tr><th>Year</th><th>Commits (% of all)</th><th>Lines added</th><th>Lines removed</th></tr>')
for yy in reversed(sorted(data.commits_by_year.keys())):
f.write('<tr><td>%s</td><td>%d (%.2f%%)</td><td>%d</td><td>%d</td></tr>' % (
yy, data.commits_by_year.get(yy, 0), (100.0 * data.commits_by_year.get(yy, 0)) / data.getTotalCommits(),
data.lines_added_by_year.get(yy, 0), data.lines_removed_by_year.get(yy, 0)))
f.write('</table></div>')
f.write('<img src="commits_by_year.png" alt="Commits by Year">')
fg = open(path + '/commits_by_year.dat', 'w')
for yy in sorted(data.commits_by_year.keys()):
fg.write('%d %d\n' % (yy, data.commits_by_year[yy]))
fg.close()
# Commits by timezone
f.write(html_header(2, 'Commits by Timezone'))
f.write('<table><tr>')
f.write('<th>Timezone</th><th>Commits</th>')
f.write('</tr>')
max_commits_on_tz = max(data.commits_by_timezone.values())
for i in sorted(data.commits_by_timezone.keys(), key=lambda n: int(n)):
commits = data.commits_by_timezone[i]
r = 127 + int((float(commits) / max_commits_on_tz) * 128)
f.write('<tr><th>%s</th><td style="background-color: rgb(%d, 0, 0)">%d</td></tr>' % (i, r, commits))
f.write('</table>')
f.write('</body></html>')
f.close()
###
# Authors
f = open(path + '/authors.html', 'w')
self.printHeader(f)
f.write('<h1>Authors</h1>')
self.printNav(f)
# Authors :: List of authors
f.write(html_header(2, 'List of Authors'))
f.write('<table class="authors sortable" id="authors">')
f.write(
'<tr><th>Author</th><th>Commits (%)</th><th>+ lines</th><th>- lines</th><th>First commit</th><th>Last commit</th><th class="unsortable">Age</th><th>Active days</th><th># by commits</th></tr>')
for author in data.getAuthors(conf['max_authors']):
info = data.getAuthorInfo(author)
f.write(
'<tr><td>%s</td><td>%d (%.2f%%)</td><td>%d</td><td>%d</td><td>%s</td><td>%s</td><td>%s</td><td>%d</td><td>%d</td></tr>' % (
author, info['commits'], info['commits_frac'], info['lines_added'], info['lines_removed'],
info['date_first'], info['date_last'], info['timedelta'], len(info['active_days']),
info['place_by_commits']))
f.write('</table>')
allauthors = data.getAuthors()
if len(allauthors) > conf['max_authors']:
rest = allauthors[conf['max_authors']:]
f.write('<p class="moreauthors">These didn\'t make it to the top: %s</p>' % ', '.join(rest))
f.write(html_header(2, 'Cumulated Added Lines of Code per Author'))
f.write('<img src="lines_of_code_by_author.png" alt="Lines of code per Author">')
if len(allauthors) > conf['max_authors']:
f.write('<p class="moreauthors">Only top %d authors shown</p>' % conf['max_authors'])
f.write(html_header(2, 'Commits per Author'))
f.write('<img src="commits_by_author.png" alt="Commits per Author">')
if len(allauthors) > conf['max_authors']:
f.write('<p class="moreauthors">Only top %d authors shown</p>' % conf['max_authors'])
fgl = open(path + '/lines_of_code_by_author.dat', 'w')
fgc = open(path + '/commits_by_author.dat', 'w')
lines_by_authors = {} # cumulated added lines by
# author. to save memory,
# changes_by_date_by_author[stamp][author] is defined
# only at points where author commits.
# lines_by_authors allows us to generate all the
# points in the .dat file.
# Don't rely on getAuthors to give the same order each
# time. Be robust and keep the list in a variable.
commits_by_authors = {} # cumulated added lines by
self.authors_to_plot = data.getAuthors(conf['max_authors'])
for author in self.authors_to_plot:
lines_by_authors[author] = 0
commits_by_authors[author] = 0
for stamp in sorted(data.changes_by_date_by_author.keys()):
fgl.write('%d' % stamp)
fgc.write('%d' % stamp)
for author in self.authors_to_plot:
if author in data.changes_by_date_by_author[stamp].keys():
lines_by_authors[author] = data.changes_by_date_by_author[stamp][author]['lines_added']
commits_by_authors[author] = data.changes_by_date_by_author[stamp][author]['commits']
fgl.write(' %d' % lines_by_authors[author])
fgc.write(' %d' % commits_by_authors[author])
fgl.write('\n')
fgc.write('\n')
fgl.close()
fgc.close()
# Authors :: Author of Month
f.write(html_header(2, 'Author of Month'))
f.write('<table class="sortable" id="aom">')
f.write(
'<tr><th>Month</th><th>Author</th><th>Commits (%%)</th><th class="unsortable">Next top %d</th><th>Number of authors</th></tr>' %
conf['authors_top'])
for yymm in reversed(sorted(data.author_of_month.keys())):
authordict = data.author_of_month[yymm]
authors = getkeyssortedbyvalues(authordict)
authors.reverse()
commits = data.author_of_month[yymm][authors[0]]
next = ', '.join(authors[1:conf['authors_top'] + 1])
f.write('<tr><td>%s</td><td>%s</td><td>%d (%.2f%% of %d)</td><td>%s</td><td>%d</td></tr>' % (
yymm, authors[0], commits, (100.0 * commits) / data.commits_by_month[yymm], data.commits_by_month[yymm],
next, len(authors)))
f.write('</table>')
f.write(html_header(2, 'Author of Year'))
f.write(
'<table class="sortable" id="aoy"><tr><th>Year</th><th>Author</th><th>Commits (%%)</th><th class="unsortable">Next top %d</th><th>Number of authors</th></tr>' %
conf['authors_top'])
for yy in reversed(sorted(data.author_of_year.keys())):
authordict = data.author_of_year[yy]
authors = getkeyssortedbyvalues(authordict)
authors.reverse()
commits = data.author_of_year[yy][authors[0]]
next = ', '.join(authors[1:conf['authors_top'] + 1])
f.write('<tr><td>%s</td><td>%s</td><td>%d (%.2f%% of %d)</td><td>%s</td><td>%d</td></tr>' % (
yy, authors[0], commits, (100.0 * commits) / data.commits_by_year[yy], data.commits_by_year[yy], next,
len(authors)))
f.write('</table>')
# Domains
f.write(html_header(2, 'Commits by Domains'))
domains_by_commits = getkeyssortedbyvaluekey(data.domains, 'commits')
domains_by_commits.reverse() # most first
f.write('<div class="vtable"><table>')
f.write('<tr><th>Domains</th><th>Total (%)</th></tr>')
fp = open(path + '/domains.dat', 'w')
n = 0
for domain in domains_by_commits:
if n == conf['max_domains']:
break
commits = 0
n += 1
info = data.getDomainInfo(domain)
fp.write('%s %d %d\n' % (domain, n, info['commits']))
f.write('<tr><th>%s</th><td>%d (%.2f%%)</td></tr>' % (
domain, info['commits'], (100.0 * info['commits'] / totalcommits)))
f.write('</table></div>')
f.write('<img src="domains.png" alt="Commits by Domains">')
fp.close()
f.write('</body></html>')
f.close()
###
# Files
f = open(path + '/files.html', 'w')
self.printHeader(f)
f.write('<h1>Files</h1>')
self.printNav(f)
f.write('<dl>\n')
f.write('<dt>Total files</dt><dd>%d</dd>' % data.getTotalFiles())
f.write('<dt>Total lines</dt><dd>%d</dd>' % data.getTotalLOC())
try:
f.write(
'<dt>Average file size</dt><dd>%.2f bytes</dd>' % (float(data.getTotalSize()) / data.getTotalFiles()))
except ZeroDivisionError:
pass
f.write('</dl>\n')
# Files :: File count by date
f.write(html_header(2, 'File count by date'))
# use set to get rid of duplicate/unnecessary entries
files_by_date = set()
for stamp in sorted(data.files_by_stamp.keys()):
files_by_date.add(
'%s %d' % (datetime.datetime.fromtimestamp(stamp).strftime('%Y-%m-%d'), data.files_by_stamp[stamp]))
fg = open(path + '/files_by_date.dat', 'w')
for line in sorted(list(files_by_date)):
fg.write('%s\n' % line)
# for stamp in sorted(data.files_by_stamp.keys()):
# fg.write('%s %d\n' % (datetime.datetime.fromtimestamp(stamp).strftime('%Y-%m-%d'), data.files_by_stamp[stamp]))
fg.close()
f.write('<img src="files_by_date.png" alt="Files by Date">')
# f.write('<h2>Average file size by date</h2>')
# Files :: Extensions
f.write(html_header(2, 'Extensions'))
f.write(
'<table class="sortable" id="ext"><tr><th>Extension</th><th>Files (%)</th><th>Lines (%)</th><th>Lines/file</th></tr>')
for ext in sorted(data.extensions.keys()):
files = data.extensions[ext]['files']
lines = data.extensions[ext]['lines']
try:
loc_percentage = (100.0 * lines) / data.getTotalLOC()
except ZeroDivisionError:
loc_percentage = 0
f.write('<tr><td>%s</td><td>%d (%.2f%%)</td><td>%d (%.2f%%)</td><td>%d</td></tr>' % (
ext, files, (100.0 * files) / data.getTotalFiles(), lines, loc_percentage, lines / files))
f.write('</table>')
f.write('</body></html>')
f.close()
###
# Lines
f = open(path + '/lines.html', 'w')
self.printHeader(f)
f.write('<h1>Lines</h1>')
self.printNav(f)
f.write('<dl>\n')
f.write('<dt>Total lines</dt><dd>%d</dd>' % data.getTotalLOC())
f.write('</dl>\n')
f.write(html_header(2, 'Lines of Code'))
f.write('<img src="lines_of_code.png" alt="Lines of Code">')
fg = open(path + '/lines_of_code.dat', 'w')
for stamp in sorted(data.changes_by_date.keys()):
fg.write('%d %d\n' % (stamp, data.changes_by_date[stamp]['lines']))
fg.close()
f.write('</body></html>')
f.close()
###
# tags.html
f = open(path + '/tags.html', 'w')
self.printHeader(f)
f.write('<h1>Tags</h1>')
self.printNav(f)
f.write('<dl>')
f.write('<dt>Total tags</dt><dd>%d</dd>' % len(data.tags))
if len(data.tags) > 0:
f.write('<dt>Average commits per tag</dt><dd>%.2f</dd>' % (1.0 * data.getTotalCommits() / len(data.tags)))
f.write('</dl>')
f.write('<table class="tags">')
f.write('<tr><th>Name</th><th>Date</th><th>Commits</th><th>Authors</th></tr>')
# sort the tags by date desc
tags_sorted_by_date_desc = map(lambda el: el[1],
reversed(sorted(map(lambda el: (el[1]['date'], el[0]), data.tags.items()))))
for tag in tags_sorted_by_date_desc:
authorinfo = []
self.authors_by_commits = getkeyssortedbyvalues(data.tags[tag]['authors'])
for i in reversed(self.authors_by_commits):
authorinfo.append('%s (%d)' % (i, data.tags[tag]['authors'][i]))
f.write('<tr><td>%s</td><td>%s</td><td>%d</td><td>%s</td></tr>' % (
tag, data.tags[tag]['date'], data.tags[tag]['commits'], ', '.join(authorinfo)))
f.write('</table>')
f.write('</body></html>')
f.close()
self.createGraphs(path)
def createGraphs(self, path):
print
'Generating graphs...'
# hour of day
f = open(path + '/hour_of_day.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'hour_of_day.png'
unset key
set xrange [0.5:24.5]
set yrange [0:]
set xtics 4
set grid y
set ylabel "Commits"
plot 'hour_of_day.dat' using 1:2:(0.5) w boxes fs solid
""")
f.close()
# day of week
f = open(path + '/day_of_week.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'day_of_week.png'
unset key
set xrange [0.5:7.5]
set yrange [0:]
set xtics 1
set grid y
set ylabel "Commits"
plot 'day_of_week.dat' using 1:3:(0.5):xtic(2) w boxes fs solid
""")
f.close()
# Domains
f = open(path + '/domains.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'domains.png'
unset key
unset xtics
set yrange [0:]
set grid y
set ylabel "Commits"
plot 'domains.dat' using 2:3:(0.5) with boxes fs solid, '' using 2:3:1 with labels rotate by 45 offset 0,1
""")
f.close()
# Month of Year
f = open(path + '/month_of_year.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'month_of_year.png'
unset key
set xrange [0.5:12.5]
set yrange [0:]
set xtics 1
set grid y
set ylabel "Commits"
plot 'month_of_year.dat' using 1:2:(0.5) w boxes fs solid
""")
f.close()
# commits_by_year_month
f = open(path + '/commits_by_year_month.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'commits_by_year_month.png'
unset key
set yrange [0:]
set xdata time
set timefmt "%Y-%m"
set format x "%Y-%m"
set xtics rotate
set bmargin 5
set grid y
set ylabel "Commits"
plot 'commits_by_year_month.dat' using 1:2:(0.5) w boxes fs solid
""")
f.close()
# commits_by_year
f = open(path + '/commits_by_year.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'commits_by_year.png'
unset key
set yrange [0:]
set xtics 1 rotate
set grid y
set ylabel "Commits"
set yrange [0:]
plot 'commits_by_year.dat' using 1:2:(0.5) w boxes fs solid
""")
f.close()
# Files by date
f = open(path + '/files_by_date.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'files_by_date.png'
unset key
set yrange [0:]
set xdata time
set timefmt "%Y-%m-%d"
set format x "%Y-%m-%d"
set grid y
set ylabel "Files"
set xtics rotate
set ytics autofreq
set bmargin 6
plot 'files_by_date.dat' using 1:2 w steps
""")
f.close()
# Lines of Code
f = open(path + '/lines_of_code.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set output 'lines_of_code.png'
unset key
set yrange [0:]
set xdata time
set timefmt "%s"
set format x "%Y-%m-%d"
set grid y
set ylabel "Lines"
set xtics rotate
set bmargin 6
plot 'lines_of_code.dat' using 1:2 w lines
""")
f.close()
# Lines of Code Added per author
f = open(path + '/lines_of_code_by_author.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set terminal png transparent size 640,480
set output 'lines_of_code_by_author.png'
set key left top
set yrange [0:]
set xdata time
set timefmt "%s"
set format x "%Y-%m-%d"
set grid y
set ylabel "Lines"
set xtics rotate
set bmargin 6
plot """
)
i = 1
plots = []
for a in self.authors_to_plot:
i = i + 1
author = a.replace("\"", "\\\"").replace("`", "")
plots.append("""'lines_of_code_by_author.dat' using 1:%d title "%s" w lines""" % (i, author))
f.write(", ".join(plots))
f.write('\n')
f.close()
# Commits per author
f = open(path + '/commits_by_author.plot', 'w')
f.write(GNUPLOT_COMMON)
f.write(
"""
set terminal png transparent size 640,480
set output 'commits_by_author.png'
set key left top
set yrange [0:]
set xdata time
set timefmt "%s"
set format x "%Y-%m-%d"
set grid y
set ylabel "Commits"
set xtics rotate
set bmargin 6
plot """
)
i = 1
plots = []
for a in self.authors_to_plot:
i = i + 1
author = a.replace("\"", "\\\"").replace("`", "")
plots.append("""'commits_by_author.dat' using 1:%d title "%s" w lines""" % (i, author))
f.write(", ".join(plots))
f.write('\n')
f.close()
os.chdir(path)
files = glob.glob(path + '/*.plot')
for f in files:
out = getpipeoutput([gnuplot_cmd + ' "%s"' % f])
if len(out) > 0:
print
out
def printHeader(self, f, title=''):
f.write(
"""<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>GitStats - %s</title>
<link rel="stylesheet" href="%s" type="text/css">
<meta name="generator" content="GitStats %s">
<script type="text/javascript" src="sortable.js"></script>
</head>
<body>
""" % (self.title, conf['style'], getversion()))
def printNav(self, f):
f.write("""
<div class="nav">
<ul>
<li><a href="index.html">General</a></li>
<li><a href="activity.html">Activity</a></li>
<li><a href="authors.html">Authors</a></li>
<li><a href="files.html">Files</a></li>
<li><a href="lines.html">Lines</a></li>
<li><a href="tags.html">Tags</a></li>
</ul>
</div>
""")
def usage():
print
"""
Usage: gitstats [options] <gitpath..> <outputpath>
Options:
-c key=value Override configuration value
Default config values:
%s
Please see the manual page for more details.
""" % conf
class GitStats:
def run(self, args_orig):
optlist, args = getopt.getopt(args_orig, 'hc:', ["help"])
for o, v in optlist:
if o == '-c':
key, value = v.split('=', 1)
if key not in conf:
raise KeyError('no such key "%s" in config' % key)
if isinstance(conf[key], int):
conf[key] = int(value)
else:
conf[key] = value
elif o in ('-h', '--help'):
usage()
sys.exit()
if len(args) < 2:
usage()
sys.exit(0)
outputpath = os.path.abspath(args[-1])
rundir = os.getcwd()
try:
os.makedirs(outputpath)
except OSError:
pass
if not os.path.isdir(outputpath):
print
'FATAL: Output path is not a directory or does not exist'
sys.exit(1)
if not getgnuplotversion():
print
'gnuplot not found'
sys.exit(1)
print
'Output path: %s' % outputpath
cachefile = os.path.join(outputpath, 'gitstats.cache')
data = GitDataCollector()
data.loadCache(cachefile)
for gitpath in args[0:-1]:
print
'Git path: %s' % gitpath
prevdir = os.getcwd()
os.chdir(gitpath)
print
'Collecting data...'
data.collect(gitpath)
os.chdir(prevdir)
print
'Refining data...'
data.saveCache(cachefile)
data.refine()
os.chdir(rundir)
print
'Generating report...'
report = HTMLReportCreator()
report.create(data, outputpath)
time_end = time.time()
exectime_internal = time_end - time_start
print
'Execution time %.5f secs, %.5f secs (%.2f %%) in external commands)' % (
exectime_internal, exectime_external, (100.0 * exectime_external) / exectime_internal)
if sys.stdin.isatty():
print
'You may now run:'
print
print
' sensible-browser \'%s\'' % os.path.join(outputpath, 'index.html').replace("'", "'\\''")
print
if __name__ == '__main__':
g = GitStats()
g.run(sys.argv[1:])
|
|
from __future__ import unicode_literals
from io import BytesIO
from django.test import TestCase
from mock import patch
from dbbackup.db.exceptions import DumpError
from dbbackup.db.postgresql import (
PgDumpBinaryConnector,
PgDumpConnector,
PgDumpGisConnector,
)
@patch('dbbackup.db.postgresql.PgDumpConnector.run_command',
return_value=(BytesIO(b'foo'), BytesIO()))
class PgDumpConnectorTest(TestCase):
def setUp(self):
self.connector = PgDumpConnector()
self.connector.settings['ENGINE'] = 'django.db.backends.postgresql'
self.connector.settings['NAME'] = 'dbname'
self.connector.settings['HOST'] = 'hostname'
def test_user_password_uses_special_characters(self, mock_dump_cmd):
self.connector.settings['PASSWORD'] = '@!'
self.connector.settings['USER'] = '@'
self.connector.create_dump()
self.assertIn('postgresql://%40:%40%21@hostname/dbname', mock_dump_cmd.call_args[0][0])
def test_create_dump(self, mock_dump_cmd):
dump = self.connector.create_dump()
# Test dump
dump_content = dump.read()
self.assertTrue(dump_content)
self.assertEqual(dump_content, b'foo')
# Test cmd
self.assertTrue(mock_dump_cmd.called)
def test_create_dump_without_host_raises_error(self, mock_dump_cmd):
self.connector.settings.pop('HOST', None)
with self.assertRaises(DumpError):
self.connector.create_dump()
def test_password_but_no_user(self, mock_dump_cmd):
self.connector.settings.pop('USER', None)
self.connector.settings['PASSWORD'] = 'hello'
self.connector.create_dump()
self.assertIn('postgresql://hostname/dbname', mock_dump_cmd.call_args[0][0])
def test_create_dump_host(self, mock_dump_cmd):
# With
self.connector.settings['HOST'] = 'foo'
self.connector.create_dump()
self.assertIn('postgresql://foo/dbname', mock_dump_cmd.call_args[0][0])
def test_create_dump_port(self, mock_dump_cmd):
# Without
self.connector.settings.pop('PORT', None)
self.connector.create_dump()
self.assertIn('postgresql://hostname/dbname', mock_dump_cmd.call_args[0][0])
# With
self.connector.settings['PORT'] = 42
self.connector.create_dump()
self.assertIn('postgresql://hostname:42/dbname', mock_dump_cmd.call_args[0][0])
def test_create_dump_user(self, mock_dump_cmd):
# Without
self.connector.settings.pop('USER', None)
self.connector.create_dump()
self.assertIn('postgresql://hostname/dbname', mock_dump_cmd.call_args[0][0])
# With
self.connector.settings['USER'] = 'foo'
self.connector.create_dump()
self.assertIn('postgresql://foo@hostname/dbname', mock_dump_cmd.call_args[0][0])
def test_create_dump_exclude(self, mock_dump_cmd):
# Without
self.connector.create_dump()
self.assertNotIn(' --exclude-table-data=', mock_dump_cmd.call_args[0][0])
# With
self.connector.exclude = ('foo',)
self.connector.create_dump()
self.assertIn(' --exclude-table-data=foo', mock_dump_cmd.call_args[0][0])
# With serveral
self.connector.exclude = ('foo', 'bar')
self.connector.create_dump()
self.assertIn(' --exclude-table-data=foo', mock_dump_cmd.call_args[0][0])
self.assertIn(' --exclude-table-data=bar', mock_dump_cmd.call_args[0][0])
def test_create_dump_drop(self, mock_dump_cmd):
# Without
self.connector.drop = False
self.connector.create_dump()
self.assertNotIn(' --clean', mock_dump_cmd.call_args[0][0])
# With
self.connector.drop = True
self.connector.create_dump()
self.assertIn(' --clean', mock_dump_cmd.call_args[0][0])
@patch('dbbackup.db.postgresql.PgDumpConnector.run_command',
return_value=(BytesIO(), BytesIO()))
def test_restore_dump(self, mock_dump_cmd, mock_restore_cmd):
dump = self.connector.create_dump()
self.connector.restore_dump(dump)
# Test cmd
self.assertTrue(mock_restore_cmd.called)
@patch('dbbackup.db.postgresql.PgDumpConnector.run_command',
return_value=(BytesIO(), BytesIO()))
def test_restore_dump_user(self, mock_dump_cmd, mock_restore_cmd):
dump = self.connector.create_dump()
# Without
self.connector.settings.pop('USER', None)
self.connector.restore_dump(dump)
self.assertIn(
'postgresql://hostname/dbname',
mock_restore_cmd.call_args[0][0]
)
self.assertNotIn(' --username=', mock_restore_cmd.call_args[0][0])
# With
self.connector.settings['USER'] = 'foo'
self.connector.restore_dump(dump)
self.assertIn(
'postgresql://foo@hostname/dbname',
mock_restore_cmd.call_args[0][0]
)
@patch('dbbackup.db.postgresql.PgDumpBinaryConnector.run_command',
return_value=(BytesIO(b'foo'), BytesIO()))
class PgDumpBinaryConnectorTest(TestCase):
def setUp(self):
self.connector = PgDumpBinaryConnector()
self.connector.settings['HOST'] = 'hostname'
self.connector.settings['ENGINE'] = 'django.db.backends.postgresql'
self.connector.settings['NAME'] = 'dbname'
def test_create_dump(self, mock_dump_cmd):
dump = self.connector.create_dump()
# Test dump
dump_content = dump.read()
self.assertTrue(dump_content)
self.assertEqual(dump_content, b'foo')
# Test cmd
self.assertTrue(mock_dump_cmd.called)
self.assertIn('--format=custom', mock_dump_cmd.call_args[0][0])
def test_create_dump_exclude(self, mock_dump_cmd):
# Without
self.connector.create_dump()
self.assertNotIn(' --exclude-table-data=', mock_dump_cmd.call_args[0][0])
# With
self.connector.exclude = ('foo',)
self.connector.create_dump()
self.assertIn(' --exclude-table-data=foo', mock_dump_cmd.call_args[0][0])
# With serveral
self.connector.exclude = ('foo', 'bar')
self.connector.create_dump()
self.assertIn(' --exclude-table-data=foo', mock_dump_cmd.call_args[0][0])
self.assertIn(' --exclude-table-data=bar', mock_dump_cmd.call_args[0][0])
def test_create_dump_drop(self, mock_dump_cmd):
# Without
self.connector.drop = False
self.connector.create_dump()
self.assertNotIn(' --clean', mock_dump_cmd.call_args[0][0])
# Binary drop at restore level
self.connector.drop = True
self.connector.create_dump()
self.assertNotIn(' --clean', mock_dump_cmd.call_args[0][0])
@patch('dbbackup.db.postgresql.PgDumpBinaryConnector.run_command',
return_value=(BytesIO(), BytesIO()))
def test_restore_dump(self, mock_dump_cmd, mock_restore_cmd):
dump = self.connector.create_dump()
self.connector.restore_dump(dump)
# Test cmd
self.assertTrue(mock_restore_cmd.called)
@patch('dbbackup.db.postgresql.PgDumpGisConnector.run_command',
return_value=(BytesIO(b'foo'), BytesIO()))
class PgDumpGisConnectorTest(TestCase):
def setUp(self):
self.connector = PgDumpGisConnector()
self.connector.settings['HOST'] = 'hostname'
@patch('dbbackup.db.postgresql.PgDumpGisConnector.run_command',
return_value=(BytesIO(b'foo'), BytesIO()))
def test_restore_dump(self, mock_dump_cmd, mock_restore_cmd):
dump = self.connector.create_dump()
# Without ADMINUSER
self.connector.settings.pop('ADMIN_USER', None)
self.connector.restore_dump(dump)
self.assertTrue(mock_restore_cmd.called)
# With
self.connector.settings['ADMIN_USER'] = 'foo'
self.connector.restore_dump(dump)
self.assertTrue(mock_restore_cmd.called)
def test_enable_postgis(self, mock_dump_cmd):
self.connector.settings['ADMIN_USER'] = 'foo'
self.connector._enable_postgis()
self.assertIn('"CREATE EXTENSION IF NOT EXISTS postgis;"', mock_dump_cmd.call_args[0][0])
self.assertIn('--username=foo', mock_dump_cmd.call_args[0][0])
def test_enable_postgis_host(self, mock_dump_cmd):
self.connector.settings['ADMIN_USER'] = 'foo'
# Without
self.connector.settings.pop('HOST', None)
self.connector._enable_postgis()
self.assertNotIn(' --host=', mock_dump_cmd.call_args[0][0])
# With
self.connector.settings['HOST'] = 'foo'
self.connector._enable_postgis()
self.assertIn(' --host=foo', mock_dump_cmd.call_args[0][0])
def test_enable_postgis_port(self, mock_dump_cmd):
self.connector.settings['ADMIN_USER'] = 'foo'
# Without
self.connector.settings.pop('PORT', None)
self.connector._enable_postgis()
self.assertNotIn(' --port=', mock_dump_cmd.call_args[0][0])
# With
self.connector.settings['PORT'] = 42
self.connector._enable_postgis()
self.assertIn(' --port=42', mock_dump_cmd.call_args[0][0])
@patch('dbbackup.db.base.Popen', **{
'return_value.wait.return_value': True,
'return_value.poll.return_value': False,
})
class PgDumpConnectorRunCommandTest(TestCase):
def test_run_command(self, mock_popen):
connector = PgDumpConnector()
connector.settings['HOST'] = 'hostname'
connector.create_dump()
self.assertEqual(mock_popen.call_args[0][0][0], 'pg_dump')
def test_run_command_with_password(self, mock_popen):
connector = PgDumpConnector()
connector.settings['HOST'] = 'hostname'
connector.settings['PASSWORD'] = 'foo'
connector.create_dump()
self.assertEqual(mock_popen.call_args[0][0][0], 'pg_dump')
def test_run_command_with_password_and_other(self, mock_popen):
connector = PgDumpConnector(env={'foo': 'bar'})
connector.settings['HOST'] = 'hostname'
connector.settings['PASSWORD'] = 'foo'
connector.create_dump()
self.assertEqual(mock_popen.call_args[0][0][0], 'pg_dump')
self.assertIn('foo', mock_popen.call_args[1]['env'])
self.assertEqual('bar', mock_popen.call_args[1]['env']['foo'])
|
|
#!/usr/bin/env python
"""
Unittests for wtforms.ext.appengine
To run the tests, use NoseGAE:
easy_install nose
easy_install nose-gae
nosetests --with-gae --without-sandbox
"""
import sys, os
WTFORMS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, WTFORMS_DIR)
from unittest import TestCase
from google.appengine.ext import db
from wtforms import Form, fields as f, validators
from wtforms.ext.appengine.db import model_form
from wtforms.ext.appengine.fields import GeoPtPropertyField
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class Author(db.Model):
name = db.StringProperty(required=True)
city = db.StringProperty()
age = db.IntegerProperty(required=True)
is_admin = db.BooleanProperty(default=False)
class Book(db.Model):
author = db.ReferenceProperty(Author)
class AllPropertiesModel(db.Model):
"""Property names are ugly, yes."""
prop_string = db.StringProperty()
prop_byte_string = db.ByteStringProperty()
prop_boolean = db.BooleanProperty()
prop_integer = db.IntegerProperty()
prop_float = db.FloatProperty()
prop_date_time = db.DateTimeProperty()
prop_date = db.DateProperty()
prop_time = db.TimeProperty()
prop_list = db.ListProperty(int)
prop_string_list = db.StringListProperty()
prop_reference = db.ReferenceProperty()
prop_self_refeference = db.SelfReferenceProperty()
prop_user = db.UserProperty()
prop_blob = db.BlobProperty()
prop_text = db.TextProperty()
prop_category = db.CategoryProperty()
prop_link = db.LinkProperty()
prop_email = db.EmailProperty()
prop_geo_pt = db.GeoPtProperty()
prop_im = db.IMProperty()
prop_phone_number = db.PhoneNumberProperty()
prop_postal_address = db.PostalAddressProperty()
prop_rating = db.RatingProperty()
class DateTimeModel(db.Model):
prop_date_time_1 = db.DateTimeProperty()
prop_date_time_2 = db.DateTimeProperty(auto_now=True)
prop_date_time_3 = db.DateTimeProperty(auto_now_add=True)
prop_date_1 = db.DateProperty()
prop_date_2 = db.DateProperty(auto_now=True)
prop_date_3 = db.DateProperty(auto_now_add=True)
prop_time_1 = db.TimeProperty()
prop_time_2 = db.TimeProperty(auto_now=True)
prop_time_3 = db.TimeProperty(auto_now_add=True)
class TestModelForm(TestCase):
def tearDown(self):
for entity in Author.all():
db.delete(entity)
for entity in Book.all():
db.delete(entity)
def test_model_form_basic(self):
form_class = model_form(Author)
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), True)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
self.assertEqual(isinstance(form.is_admin, f.BooleanField), True)
def test_required_field(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.flags.required, True)
self.assertEqual(form.city.flags.required, False)
self.assertEqual(form.age.flags.required, True)
self.assertEqual(form.is_admin.flags.required, False)
def test_default_value(self):
form_class = model_form(Author)
form = form_class()
self.assertEqual(form.name.default, None)
self.assertEqual(form.city.default, None)
self.assertEqual(form.age.default, None)
self.assertEqual(form.is_admin.default, False)
def test_model_form_only(self):
form_class = model_form(Author, only=['name', 'age'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), False)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_model_form_exclude(self):
form_class = model_form(Author, exclude=['is_admin'])
self.assertEqual(hasattr(form_class, 'name'), True)
self.assertEqual(hasattr(form_class, 'city'), True)
self.assertEqual(hasattr(form_class, 'age'), True)
self.assertEqual(hasattr(form_class, 'is_admin'), False)
form = form_class()
self.assertEqual(isinstance(form.name, f.TextField), True)
self.assertEqual(isinstance(form.city, f.TextField), True)
self.assertEqual(isinstance(form.age, f.IntegerField), True)
def test_datetime_model(self):
"""Fields marked as auto_add / auto_add_now should not be included."""
form_class = model_form(DateTimeModel)
self.assertEqual(hasattr(form_class, 'prop_date_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_time_3'), False)
self.assertEqual(hasattr(form_class, 'prop_date_1'), True)
self.assertEqual(hasattr(form_class, 'prop_date_2'), False)
self.assertEqual(hasattr(form_class, 'prop_date_3'), False)
self.assertEqual(hasattr(form_class, 'prop_time_1'), True)
self.assertEqual(hasattr(form_class, 'prop_time_2'), False)
self.assertEqual(hasattr(form_class, 'prop_time_3'), False)
def test_not_implemented_properties(self):
# This should not raise NotImplementedError.
form_class = model_form(AllPropertiesModel)
# These should be set.
self.assertEqual(hasattr(form_class, 'prop_string'), True)
self.assertEqual(hasattr(form_class, 'prop_byte_string'), True)
self.assertEqual(hasattr(form_class, 'prop_boolean'), True)
self.assertEqual(hasattr(form_class, 'prop_integer'), True)
self.assertEqual(hasattr(form_class, 'prop_float'), True)
self.assertEqual(hasattr(form_class, 'prop_date_time'), True)
self.assertEqual(hasattr(form_class, 'prop_date'), True)
self.assertEqual(hasattr(form_class, 'prop_time'), True)
self.assertEqual(hasattr(form_class, 'prop_string_list'), True)
self.assertEqual(hasattr(form_class, 'prop_reference'), True)
self.assertEqual(hasattr(form_class, 'prop_self_refeference'), True)
self.assertEqual(hasattr(form_class, 'prop_blob'), True)
self.assertEqual(hasattr(form_class, 'prop_text'), True)
self.assertEqual(hasattr(form_class, 'prop_category'), True)
self.assertEqual(hasattr(form_class, 'prop_link'), True)
self.assertEqual(hasattr(form_class, 'prop_email'), True)
self.assertEqual(hasattr(form_class, 'prop_geo_pt'), True)
self.assertEqual(hasattr(form_class, 'prop_phone_number'), True)
self.assertEqual(hasattr(form_class, 'prop_postal_address'), True)
self.assertEqual(hasattr(form_class, 'prop_rating'), True)
# These should NOT be set.
self.assertEqual(hasattr(form_class, 'prop_list'), False)
self.assertEqual(hasattr(form_class, 'prop_user'), False)
self.assertEqual(hasattr(form_class, 'prop_im'), False)
def test_populate_form(self):
entity = Author(key_name='test', name='John', city='Yukon', age=25, is_admin=True)
entity.put()
obj = Author.get_by_key_name('test')
form_class = model_form(Author)
form = form_class(obj=obj)
self.assertEqual(form.name.data, 'John')
self.assertEqual(form.city.data, 'Yukon')
self.assertEqual(form.age.data, 25)
self.assertEqual(form.is_admin.data, True)
def test_field_attributes(self):
form_class = model_form(Author, field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
},
'city': {
'label': 'City',
'description': 'The city in which you live, not the one in which you were born.',
},
'is_admin': {
'label': 'Administrative rights',
},
})
form = form_class()
self.assertEqual(form.name.label.text, 'Full name')
self.assertEqual(form.name.description, 'Your name')
self.assertEqual(form.age.label.text, 'Age')
self.assertEqual(form.city.label.text, 'City')
self.assertEqual(form.city.description, 'The city in which you live, not the one in which you were born.')
self.assertEqual(form.is_admin.label.text, 'Administrative rights')
def test_reference_property(self):
keys = []
for name in ['foo', 'bar', 'baz']:
author = Author(name=name, age=26)
author.put()
keys.append(str(author.key()))
form_class = model_form(Book)
form = form_class()
choices = []
i = 0
for key, name, value in form.author.iter_choices():
self.assertEqual(key, keys[i])
i += 1
class TestFields(TestCase):
class GeoTestForm(Form):
geo = GeoPtPropertyField()
def test_geopt_property(self):
form = self.GeoTestForm(DummyPostData(geo='5.0, -7.0'))
self.assert_(form.validate())
self.assertEquals(form.geo.data, u'5.0,-7.0')
form = self.GeoTestForm(DummyPostData(geo='5.0,-f'))
self.assert_(not form.validate())
|
|
import re
from dataclasses import dataclass
from enum import Enum
from textwrap import dedent
from typing import Any, Dict, List, Optional, Type
from pytest import mark, param, raises
import tests
from omegaconf import (
DictConfig,
FloatNode,
IntegerNode,
ListConfig,
OmegaConf,
ReadonlyConfigError,
UnsupportedValueType,
ValidationError,
)
from omegaconf._utils import format_and_raise, type_str
from omegaconf.errors import (
ConfigAttributeError,
ConfigKeyError,
ConfigTypeError,
ConfigValueError,
GrammarParseError,
InterpolationKeyError,
InterpolationResolutionError,
InterpolationToMissingValueError,
InterpolationValidationError,
KeyValidationError,
MissingMandatoryValue,
OmegaConfBaseException,
)
from tests import (
A,
Color,
ConcretePlugin,
IllegalType,
Module,
NestedInterpolationToMissing,
Package,
Plugin,
Str2Int,
StructuredInterpolationKeyError,
StructuredInterpolationValidationError,
StructuredWithBadDict,
StructuredWithBadList,
StructuredWithMissing,
SubscriptedDict,
UnionError,
User,
warns_dict_subclass_deprecated,
)
# tests classes
@dataclass
class NotOptionalInt:
foo: int = None # type:ignore
@dataclass
class NotOptionalA:
x: A = None # type: ignore
@dataclass
class Expected:
exception_type: Type[Exception]
msg: str
# "Low level exceptions" are thrown from internal APIs are are not populating all the fields
low_level: bool = False
key: Any = None
# "AUTO" : determine automatically based on OmegaConf.get_type(cfg)
object_type: Any = "AUTO"
ref_type: Any = None
# "AUTO: full_key is key
full_key: Any = "AUTO"
create: Any = lambda: None
op: Any = lambda _cfg: None
child_node: Any = lambda cfg: None
parent_node: Any = lambda cfg: cfg
object_type_str: Optional[str] = "AUTO"
ref_type_str: Optional[str] = "AUTO"
num_lines: int = 2
def finalize(self, cfg: Any) -> None:
if self.object_type == "AUTO":
self.object_type = OmegaConf.get_type(cfg)
if self.object_type_str == "AUTO":
self.object_type_str = type_str(self.object_type)
if self.ref_type_str == "AUTO" and self.ref_type is not None:
self.ref_type_str = type_str(self.ref_type)
self.num_lines = self.num_lines + 1
if self.full_key == "AUTO":
if self.key is None:
self.full_key = ""
else:
if isinstance(self.key, (str, int, Enum, float, bool, slice)):
self.full_key = self.key
else:
self.full_key = ""
params = [
##############
# DictConfig #
##############
# update
param(
Expected(
create=lambda: OmegaConf.structured(StructuredWithMissing),
op=lambda cfg: OmegaConf.update(cfg, "num", "hello"),
exception_type=ValidationError,
msg="Value 'hello' of type 'str' could not be converted to Integer",
parent_node=lambda cfg: cfg,
child_node=lambda cfg: cfg._get_node("num"),
object_type=StructuredWithMissing,
key="num",
),
id="structured:update_with_invalid_value",
),
param(
Expected(
create=lambda: OmegaConf.structured(StructuredWithMissing),
op=lambda cfg: OmegaConf.update(cfg, "num", None),
exception_type=ValidationError,
msg="field 'num' is not Optional",
parent_node=lambda cfg: cfg,
child_node=lambda cfg: cfg._get_node("num"),
object_type=StructuredWithMissing,
key="num",
),
id="structured:update:none_to_non_optional",
),
param(
Expected(
create=lambda: OmegaConf.create({}),
op=lambda cfg: OmegaConf.update(cfg, "a", IllegalType()),
key="a",
exception_type=UnsupportedValueType,
msg="Value 'IllegalType' is not a supported primitive type",
),
id="dict:update:object_of_illegal_type",
),
# pop
param(
Expected(
create=lambda: create_readonly({"foo": "bar"}),
op=lambda cfg: cfg.pop("foo"),
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
exception_type=ReadonlyConfigError,
msg="Cannot pop from read-only node",
),
id="dict,readonly:pop",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "bar"}),
op=lambda cfg: cfg.pop("not_found"),
key="not_found",
exception_type=ConfigKeyError,
msg="Key not found: 'not_found'",
),
id="dict:pop_invalid",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": {}}),
op=lambda cfg: cfg.foo.pop("not_found"),
key="not_found",
full_key="foo.not_found",
parent_node=lambda cfg: cfg.foo,
exception_type=ConfigKeyError,
msg="Key not found: 'not_found' (path: 'foo.not_found')",
),
id="dict:pop_invalid_nested",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "bar"}),
op=lambda cfg: cfg.__delitem__("not_found"),
key="not_found",
exception_type=ConfigKeyError,
msg="Key not found: 'not_found'",
),
id="dict:del_invalid",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": {}}),
op=lambda cfg: cfg.foo.__delitem__("not_found"),
key="not_found",
full_key="foo.not_found",
parent_node=lambda cfg: cfg.foo,
exception_type=ConfigKeyError,
msg="Key not found: 'not_found'",
),
id="dict:del_invalid_nested",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: getattr(cfg, "fail"),
exception_type=ConfigAttributeError,
msg="Key 'fail' not in 'ConcretePlugin'",
key="fail",
object_type=ConcretePlugin,
),
id="structured:access_invalid_attribute",
),
# getattr
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: getattr(cfg, "fail"),
exception_type=ConfigAttributeError,
msg="Key 'fail' is not in struct",
key="fail",
),
id="dict,struct:access_invalid_attribute",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "${missing}"}),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationKeyError,
msg="Interpolation key 'missing' not found",
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_missing_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "${missing[a].b[c]}"}),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationKeyError,
msg="Interpolation key 'missing[a].b[c]' not found",
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_missing_interpolation_with_full_path",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "foo_${missing}"}),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationKeyError,
msg="Interpolation key 'missing' not found",
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_missing_str_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": {"bar": "${.missing}"}}),
op=lambda cfg: getattr(cfg.foo, "bar"),
exception_type=InterpolationKeyError,
msg="Interpolation key 'missing' not found",
key="bar",
full_key="foo.bar",
child_node=lambda cfg: cfg.foo._get_node("bar"),
parent_node=lambda cfg: cfg.foo,
),
id="dict,accessing_missing_relative_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "${..missing}"}),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationKeyError,
msg="ConfigKeyError while resolving interpolation: Error resolving key '..missing'",
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_invalid_double_relative_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.create({"foo": "${int.missing}", "int": 0}),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationResolutionError,
msg=(
"ConfigTypeError raised while resolving interpolation: Error trying to access int.missing: "
"node `int` is not a container and thus cannot contain `missing`"
),
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_non_container_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.create(
{"foo": "${${missing_val}}", "missing_val": "???"}
),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=InterpolationToMissingValueError,
msg=(
"MissingMandatoryValue while resolving interpolation: "
"Missing mandatory value: missing_val"
),
key="foo",
child_node=lambda cfg: cfg._get_node("foo"),
),
id="dict,accessing_missing_nested_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.structured(StructuredInterpolationValidationError),
op=lambda cfg: getattr(cfg, "y"),
exception_type=InterpolationValidationError,
object_type=StructuredInterpolationValidationError,
msg=(
"While dereferencing interpolation '${.x}': "
"Incompatible value 'None' for field of type 'int'"
),
key="y",
child_node=lambda cfg: cfg._get_node("y"),
),
id="dict,non_optional_field_with_interpolation_to_none",
),
# setattr
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: setattr(cfg, "zlonk", "zlank"),
exception_type=ConfigAttributeError,
msg="Key 'zlonk' is not in struct",
key="zlonk",
),
id="dict,struct:set_invalid_attribute",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: setattr(cfg, "params", 20),
exception_type=ValidationError,
msg="Invalid type assigned: int is not a subclass of FoobarParams. value: 20",
key="params",
object_type=ConcretePlugin,
child_node=lambda cfg: cfg.params,
),
id="structured:setattr,invalid_type_assigned_to_structured",
),
param(
Expected(
create=lambda: create_readonly({"foo": "bar"}),
op=lambda cfg: setattr(cfg, "foo", 20),
exception_type=ReadonlyConfigError,
msg="Cannot change read-only config container",
key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict,readonly:set_attribute",
),
param(
Expected(
create=lambda: OmegaConf.create(
{"foo": DictConfig(is_optional=False, content={})}
),
op=lambda cfg: setattr(cfg, "foo", None),
exception_type=ValidationError,
msg="field 'foo' is not Optional",
key="foo",
full_key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict:setattr:not_optional:set_none",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: cfg.params.__setattr__("foo", "bar"),
exception_type=ValidationError,
msg="Value 'bar' of type 'str' could not be converted to Integer",
key="foo",
full_key="params.foo",
object_type=ConcretePlugin.FoobarParams,
ref_type=ConcretePlugin.FoobarParams,
child_node=lambda cfg: cfg.params.foo,
parent_node=lambda cfg: cfg.params,
),
id="structured:setattr,invalid_type_assigned_to_field",
),
# setitem
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: cfg.__setitem__("zoo", "zonk"),
exception_type=KeyError,
msg="Key 'zoo' is not in struct",
key="zoo",
),
id="dict,struct:setitem_on_none_existing_key",
),
param(
Expected(
create=lambda: DictConfig(key_type=Color, element_type=str, content={}),
op=lambda cfg: cfg.__setitem__("foo", "bar"),
exception_type=KeyValidationError,
msg="Key 'foo' is incompatible with the enum type 'Color', valid: [RED, GREEN, BLUE]",
full_key="foo",
key="foo",
),
id="DictConfig[Color,str]:setitem_bad_key",
),
param(
Expected(
create=lambda: DictConfig(key_type=Color, element_type=str, content={}),
op=lambda cfg: cfg.__setitem__(None, "bar"),
exception_type=KeyValidationError,
msg="Key 'None' is incompatible with the enum type 'Color', valid: [RED, GREEN, BLUE]",
key=None,
),
id="DictConfig[Color,str]:setitem_bad_key",
),
param(
Expected(
create=lambda: DictConfig(key_type=str, element_type=Color, content={}),
op=lambda cfg: cfg.__setitem__("foo", "bar"),
exception_type=ValidationError,
msg="Invalid value 'bar', expected one of [RED, GREEN, BLUE]",
full_key="foo",
key="foo",
),
id="DictConfig[str,Color]:setitem_bad_value",
),
param(
Expected(
create=lambda: OmegaConf.structured(User),
op=lambda cfg: cfg.__setitem__("name", [1, 2]),
exception_type=ValidationError,
msg="Cannot convert 'list' to string: '[1, 2]'",
full_key="name",
key="name",
low_level=True,
),
id="DictConfig[Any,Any]:setitem_stringnode_bad_value",
),
# getitem
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: cfg.__getitem__("zoo"),
exception_type=KeyError,
msg="Key 'zoo' is not in struct",
key="zoo",
),
id="dict,struct:getitem_key_not_in_struct",
),
param(
Expected(
create=lambda: DictConfig(key_type=Color, element_type=str, content={}),
op=lambda cfg: cfg.__getitem__("foo"),
exception_type=KeyValidationError,
msg="Key 'foo' is incompatible with the enum type 'Color', valid: [RED, GREEN, BLUE]",
key="foo",
),
id="DictConfig[Color,str]:getitem_str_key",
),
param(
Expected(
create=lambda: DictConfig(key_type=Color, element_type=str, content={}),
op=lambda cfg: cfg.__getitem__(None),
exception_type=KeyValidationError,
msg="Key 'None' is incompatible with the enum type 'Color', valid: [RED, GREEN, BLUE]",
key=None,
),
id="DictConfig[Color,str]:getitem_str_key_None",
),
param(
Expected(
create=lambda: DictConfig(key_type=str, element_type=str, content={}),
op=lambda cfg: cfg.__getitem__(Color.RED),
exception_type=KeyValidationError,
msg="Key Color.RED (Color) is incompatible with (str)",
full_key="RED",
key=Color.RED,
),
id="DictConfig[str,str]:getitem_color_key",
),
param(
Expected(
create=lambda: create_readonly({"foo1": "bar"}),
op=lambda cfg: cfg.merge_with({"foo2": "bar"}),
exception_type=ReadonlyConfigError,
key="foo2",
msg="Cannot change read-only config container",
),
id="dict,readonly:merge_with",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: OmegaConf.merge(cfg, {"params": {"foo": "bar"}}),
exception_type=ValidationError,
msg="Value 'bar' of type 'str' could not be converted to Integer",
key="foo",
full_key="params.foo",
object_type=ConcretePlugin.FoobarParams,
ref_type=tests.ConcretePlugin.FoobarParams,
child_node=lambda cfg: cfg.params.foo,
parent_node=lambda cfg: cfg.params,
),
id="structured:merge,invalid_field_type",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: OmegaConf.merge(cfg, {"params": {"zlonk": 10}}),
exception_type=ConfigKeyError,
msg="Key 'zlonk' not in 'FoobarParams'",
key="zlonk",
full_key="params.zlonk",
object_type=ConcretePlugin.FoobarParams,
ref_type=ConcretePlugin.FoobarParams,
parent_node=lambda cfg: cfg.params,
),
id="structured:merge,adding_an_invalid_key",
),
param(
Expected(
create=lambda: OmegaConf.structured(Package),
op=lambda cfg: OmegaConf.merge(cfg, {"modules": [{"foo": "var"}]}),
exception_type=ConfigKeyError,
msg="Key 'foo' not in 'Module'",
key="foo",
full_key="modules[0].foo",
object_type=Module,
low_level=True,
),
id="structured:merge,bad_key_merge",
),
# merge_with
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: cfg.merge_with(Plugin),
exception_type=ValidationError,
msg="Plugin is not a subclass of ConcretePlugin. value: {'name': '???', 'params': '???'}",
object_type=ConcretePlugin,
),
id="structured:merge_invalid_dataclass",
),
# get
param(
Expected(
create=lambda: OmegaConf.create(),
op=lambda cfg: cfg.get(IllegalType),
exception_type=KeyValidationError,
msg="Incompatible key type 'type'",
key=IllegalType,
full_key="",
),
id="dict:get_illegal_type",
),
param(
Expected(
create=lambda: OmegaConf.create(),
op=lambda cfg: cfg.get(IllegalType()),
exception_type=KeyValidationError,
msg="Incompatible key type 'IllegalType'",
key=IllegalType(),
),
id="dict:get_object_of_illegal_type",
),
param(
Expected(
create=lambda: DictConfig({}, key_type=int),
op=lambda cfg: cfg.get("foo"),
exception_type=KeyValidationError,
msg="Key foo (str) is incompatible with (int)",
key="foo",
full_key="foo",
),
id="dict[int,Any]:mistyped_key",
),
param(
Expected(
create=lambda: DictConfig({}, key_type=float),
op=lambda cfg: cfg.get("foo"),
exception_type=KeyValidationError,
msg="Key foo (str) is incompatible with (float)",
key="foo",
full_key="foo",
),
id="dict[float,Any]:mistyped_key",
),
param(
Expected(
create=lambda: DictConfig({}, key_type=bool),
op=lambda cfg: cfg.get("foo"),
exception_type=KeyValidationError,
msg="Key foo (str) is incompatible with (bool)",
key="foo",
full_key="foo",
),
id="dict[bool,Any]:mistyped_key",
),
# dict:create
param(
Expected(
create=lambda: None,
op=lambda _: OmegaConf.structured(NotOptionalInt),
exception_type=ValidationError,
msg="Incompatible value 'None' for field of type 'int'",
key="foo",
full_key="foo",
parent_node=lambda _: {},
object_type=NotOptionalInt,
),
id="dict:create_non_optional_with_none",
),
param(
Expected(
create=lambda: None,
op=lambda _: OmegaConf.structured(NotOptionalInt),
exception_type=ValidationError,
msg="Incompatible value 'None' for field of type 'int'",
key="foo",
full_key="foo",
parent_node=lambda _: {},
object_type=NotOptionalInt,
),
id="dict:create:not_optional_int_field_with_none",
),
param(
Expected(
create=lambda: None,
op=lambda cfg: OmegaConf.structured(NotOptionalA),
exception_type=ValidationError,
object_type=None,
key=None,
full_key="x",
msg="field 'x' is not Optional",
object_type_str="NotOptionalInt",
ref_type=A,
),
id="dict:create:not_optional_A_field_with_none",
),
param(
Expected(
create=lambda: DictConfig({}, element_type=str),
op=lambda cfg: OmegaConf.merge(cfg, {"foo": None}),
exception_type=ValidationError,
key="foo",
msg="field 'foo' is not Optional",
),
id="dict:merge_none_into_not_optional_element_type",
),
param(
Expected(
create=lambda: None,
op=lambda cfg: OmegaConf.structured(IllegalType),
exception_type=ValidationError,
msg="Input class 'IllegalType' is not a structured config. did you forget to decorate it as a dataclass?",
object_type_str=None,
ref_type_str=None,
),
id="dict_create_from_illegal_type",
),
param(
Expected(
create=lambda: None,
op=lambda _: OmegaConf.structured(
ConcretePlugin(params=ConcretePlugin.FoobarParams(foo="x")) # type: ignore
),
exception_type=ValidationError,
msg="Value 'x' of type 'str' could not be converted to Integer",
key="foo",
full_key="foo",
parent_node=lambda _: {},
object_type=ConcretePlugin.FoobarParams,
),
id="structured:create_with_invalid_value,int",
),
param(
Expected(
create=lambda: DictConfig({"bar": FloatNode(123.456)}),
op=lambda cfg: cfg.__setattr__("bar", "x"),
exception_type=ValidationError,
msg="Value 'x' of type 'str' could not be converted to Float",
key="bar",
full_key="bar",
child_node=lambda cfg: cfg._get_node("bar"),
),
id="typed_DictConfig:assign_with_invalid_value,float",
),
param(
Expected(
create=lambda: DictConfig({"bar": FloatNode(123.456)}),
op=lambda cfg: cfg.__setattr__("bar", Color.BLUE),
exception_type=ValidationError,
msg="Value 'Color.BLUE' of type 'tests.Color' could not be converted to Float",
key="bar",
full_key="bar",
child_node=lambda cfg: cfg._get_node("bar"),
),
id="typed_DictConfig:assign_with_invalid_value,full_module_in_error",
),
param(
Expected(
create=lambda: None,
op=lambda cfg: OmegaConf.structured(IllegalType()),
exception_type=ValidationError,
msg="Object of unsupported type: 'IllegalType'",
object_type_str=None,
ref_type_str=None,
),
id="structured:create_from_unsupported_object",
),
param(
Expected(
create=lambda: None,
op=lambda _: DictConfig({}, element_type=IllegalType),
exception_type=ValidationError,
msg="Unsupported value type: 'tests.IllegalType'",
),
id="structured:create_with_unsupported_element_type",
),
param(
Expected(
create=lambda: None,
op=lambda cfg: OmegaConf.structured(UnionError),
exception_type=ValueError,
msg="Union types are not supported:\nx: Union[int, str]",
num_lines=3,
),
id="structured:create_with_union_error",
),
# assign
param(
Expected(
create=lambda: DictConfig(ref_type=ConcretePlugin, content="???"),
op=lambda cfg: cfg._set_value(1),
exception_type=ValidationError,
msg="Invalid type assigned: int is not a subclass of ConcretePlugin. value: 1",
low_level=True,
ref_type=Optional[ConcretePlugin],
),
id="dict:set_value:reftype_mismatch",
),
param(
Expected(
create=lambda: DictConfig(
key_type=str, element_type=int, content={"foo": 10, "bar": 20}
),
op=lambda cfg: cfg.__setitem__("baz", "fail"),
exception_type=ValidationError,
msg="Value 'fail' of type 'str' could not be converted to Integer",
key="baz",
),
id="DictConfig[str,int]:assigned_str_value",
),
param(
Expected(
create=lambda: OmegaConf.structured(SubscriptedDict),
op=lambda cfg: cfg.__setitem__("dict_str", 1),
exception_type=ValidationError,
msg="Cannot assign int to Dict[str, int]",
key="dict_str",
ref_type=Optional[Dict[str, int]],
low_level=True,
),
id="DictConfig[str,int]:assigned_primitive_type",
),
param(
Expected(
create=lambda: OmegaConf.structured(SubscriptedDict),
op=lambda cfg: cfg.__setitem__("dict_str", User(age=2, name="bar")),
exception_type=ValidationError,
msg="Cannot assign User to Dict[str, int]",
key="dict_str",
ref_type=Optional[Dict[str, int]],
low_level=True,
),
id="DictConfig[str,int]:assigned_structured_config",
),
param(
Expected(
create=lambda: OmegaConf.structured(SubscriptedDict),
op=lambda cfg: cfg.__setitem__("dict_int", "fail"),
exception_type=ValidationError,
msg="Cannot assign str to Dict[int, int]",
key="dict_int",
ref_type=Optional[Dict[int, int]],
low_level=True,
),
id="DictConfig[int,int]:assigned_primitive_type",
),
param(
Expected(
create=lambda: OmegaConf.structured(SubscriptedDict),
op=lambda cfg: cfg.__setitem__("dict_int", User(age=2, name="bar")),
exception_type=ValidationError,
msg="Cannot assign User to Dict[int, int]",
key="dict_int",
ref_type=Optional[Dict[int, int]],
low_level=True,
),
id="DictConfig[int,int]:assigned_structured_config",
),
# delete
param(
Expected(
create=lambda: create_readonly({"foo": "bar"}),
op=lambda cfg: cfg.__delitem__("foo"),
exception_type=ReadonlyConfigError,
msg="DictConfig in read-only mode does not support deletion",
key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict,readonly:del",
),
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: cfg.__delitem__("foo"),
exception_type=ConfigTypeError,
msg="DictConfig in struct mode does not support deletion",
key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict,struct:del",
),
param(
Expected(
create=lambda: OmegaConf.structured(User(name="bond")),
op=lambda cfg: cfg.__delitem__("name"),
exception_type=ConfigTypeError,
msg="User (DictConfig) does not support deletion",
object_type=User,
key="name",
child_node=lambda cfg: cfg.name,
),
id="dict,structured:del",
),
param(
Expected(
create=lambda: create_readonly({"foo": "bar"}),
op=lambda cfg: cfg.__delattr__("foo"),
exception_type=ReadonlyConfigError,
msg="DictConfig in read-only mode does not support deletion",
key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict,readonly:delattr",
),
# creating structured config
param(
Expected(
create=lambda: None,
op=lambda _: OmegaConf.structured(StructuredWithBadDict),
exception_type=ValidationError,
msg="Cannot assign int to Dict[str, str]",
key="foo",
),
id="structured,bad_default_value_for_dict",
),
param(
Expected(
create=lambda: None,
op=lambda _: OmegaConf.structured(StructuredWithBadList),
exception_type=ValidationError,
msg="Invalid value assigned: int is not a ListConfig, list or tuple.",
key="foo",
),
id="structured,bad_default_value_for_list",
),
##############
# ListConfig #
##############
# getattr
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: setattr(cfg, "foo", 10),
exception_type=ConfigAttributeError,
key="foo",
full_key="[foo]",
msg="ListConfig does not support attribute access",
),
id="list:setattr",
),
# setattr
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: getattr(cfg, "foo"),
exception_type=ConfigAttributeError,
key="foo",
full_key="[foo]",
msg="ListConfig does not support attribute access",
),
id="list:setattr",
),
# get node
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg._get_node("foo"),
exception_type=KeyValidationError,
key="foo",
full_key="[foo]",
msg="ListConfig indices must be integers or slices, not str",
),
id="list:get_nox_ex:invalid_index_type",
),
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg._get_node(20),
exception_type=IndexError,
msg="list index out of range",
key=20,
full_key="[20]",
),
id="list:get_node_ex:index_out_of_range",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg._get_node(20),
exception_type=TypeError,
msg="Cannot get_node from a ListConfig object representing None",
key=20,
full_key="[20]",
),
id="list:get_node_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: cfg._get_node(20),
exception_type=MissingMandatoryValue,
msg="Cannot get_node from a missing ListConfig",
key=20,
full_key="[20]",
),
id="list:get_node_missing",
),
# list:create
param(
Expected(
create=lambda: None,
op=lambda cfg: ListConfig(is_optional=False, content=None),
exception_type=ValidationError,
object_type=None,
msg="Non optional ListConfig cannot be constructed from None",
object_type_str=None,
ref_type_str=None,
),
id="list:create:not_optional_with_none",
),
# append
param(
Expected(
create=lambda: OmegaConf.create([]),
op=lambda cfg: cfg.append(IllegalType()),
exception_type=UnsupportedValueType,
msg="Value 'IllegalType' is not a supported primitive type",
key=0,
full_key="[0]",
),
id="list:append_value_of_illegal_type",
),
# pop
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.pop(0),
exception_type=ReadonlyConfigError,
msg="Cannot pop from read-only ListConfig",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg[0],
),
id="list:readonly:pop",
),
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg.pop("Invalid_key_type"),
exception_type=ConfigTypeError,
msg="ListConfig indices must be integers or slices, not str",
key="Invalid_key_type",
full_key="[Invalid_key_type]",
),
id="list:pop_invalid_key",
),
param(
Expected(
create=lambda: create_struct({"foo": "bar"}),
op=lambda cfg: cfg.pop("foo"),
exception_type=ConfigTypeError,
msg="DictConfig in struct mode does not support pop",
key="foo",
child_node=lambda cfg: cfg.foo,
),
id="dict,struct:pop",
),
param(
Expected(
create=lambda: OmegaConf.structured(ConcretePlugin),
op=lambda cfg: cfg.pop("name"),
exception_type=ConfigTypeError,
msg="ConcretePlugin (DictConfig) does not support pop",
key="name",
child_node=lambda cfg: cfg.name,
),
id="dict,structured:pop",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg.pop(0),
exception_type=TypeError,
msg="Cannot pop from a ListConfig object representing None",
key=0,
full_key="[0]",
),
id="list:pop_from_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: cfg.pop(0),
exception_type=MissingMandatoryValue,
msg="Cannot pop from a missing ListConfig",
key=0,
full_key="[0]",
),
id="list:pop_from_missing",
),
# getitem
param(
Expected(
create=lambda: OmegaConf.create(["???"]),
op=lambda cfg: cfg.__getitem__(slice(0, 1)),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value: [0:1]",
key=slice(0, 1),
full_key="[0:1]",
child_node=lambda cfg: cfg._get_node(slice(0, 1)),
),
id="list:subscript_slice_with_missing",
),
param(
Expected(
create=lambda: OmegaConf.create([10, "???"]),
op=lambda cfg: cfg.__getitem__(1),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value: [1]",
key=1,
full_key="[1]",
child_node=lambda cfg: cfg._get_node(1),
),
id="list:subscript_index_with_missing",
),
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg.__getitem__(20),
exception_type=IndexError,
msg="list index out of range",
key=20,
full_key="[20]",
),
id="list:subscript:index_out_of_range",
),
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg.__getitem__("foo"),
exception_type=KeyValidationError,
msg="ListConfig indices must be integers or slices, not str",
key="foo",
full_key="[foo]",
),
id="list:getitem,illegal_key_type",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg.__getitem__(0),
exception_type=TypeError,
msg="ListConfig object representing None is not subscriptable",
key=0,
full_key="[0]",
),
id="list:getitem,illegal_key_type",
),
# setitem
param(
Expected(
create=lambda: OmegaConf.create([None]),
op=lambda cfg: cfg.__setitem__(0, IllegalType()),
exception_type=UnsupportedValueType,
msg="Value 'IllegalType' is not a supported primitive type",
key=0,
full_key="[0]",
),
id="list:setitem,illegal_value_type",
),
param(
Expected(
create=lambda: OmegaConf.create([1, 2, 3]),
op=lambda cfg: cfg.__setitem__("foo", 4),
exception_type=KeyValidationError,
msg="ListConfig indices must be integers or slices, not str",
key="foo",
full_key="[foo]",
),
id="list:setitem,illegal_key_type",
),
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.__setitem__(0, 4),
exception_type=ReadonlyConfigError,
msg="ListConfig is read-only",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg[0],
),
id="list,readonly:setitem",
),
# _set_value
param(
Expected(
create=lambda: ListConfig(is_optional=False, element_type=int, content=[]),
op=lambda cfg: cfg._set_value(None),
exception_type=ValidationError,
object_type=None,
msg="Non optional ListConfig cannot be constructed from None",
low_level=True,
),
id="list:create_not_optional:_set_value(None)",
),
param(
Expected(
create=lambda: ListConfig(content=[1, 2]),
op=lambda cfg: cfg._set_value(True),
exception_type=ValidationError,
object_type=None,
msg="Invalid value assigned: bool is not a ListConfig, list or tuple.",
ref_type=List[int],
low_level=True,
),
id="list:create_not_optional:_set_value(True)",
),
# assign
param(
Expected(
create=lambda: ListConfig(element_type=int, content=[1, 2, 3]),
op=lambda cfg: cfg.__setitem__(0, "foo"),
exception_type=ValidationError,
msg="Value 'foo' of type 'str' could not be converted to Integer",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg[0],
),
id="list,int_elements:assigned_str_element",
),
param(
Expected(
# make sure OmegaConf.create is not losing critical metadata.
create=lambda: OmegaConf.create(
ListConfig(element_type=int, content=[1, 2, 3])
),
op=lambda cfg: cfg.__setitem__(0, "foo"),
exception_type=ValidationError,
msg="Value 'foo' of type 'str' could not be converted to Integer",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg[0],
),
id="list,int_elements:assigned_str_element",
),
param(
Expected(
create=lambda: OmegaConf.create(
[IntegerNode(is_optional=False, value=0), 2, 3]
),
op=lambda cfg: cfg.__setitem__(0, None),
exception_type=ValidationError,
msg="[0] is not optional and cannot be assigned None",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg[0],
),
id="list,not_optional:null_assignment",
),
# index
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.index(99),
exception_type=ConfigValueError,
msg="Item not found in ListConfig",
),
id="list,readonly:index_not_found",
),
# insert
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.insert(1, 99),
exception_type=ReadonlyConfigError,
msg="Cannot insert into a read-only ListConfig",
key=1,
full_key="[1]",
child_node=lambda cfg: cfg[1],
),
id="list,readonly:insert",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg.insert(1, 99),
exception_type=ConfigTypeError,
msg="Cannot insert into ListConfig object representing None",
key=1,
full_key="[1]",
),
id="list:insert_into_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: cfg.insert(1, 99),
exception_type=MissingMandatoryValue,
msg="Cannot insert into missing ListConfig",
key=1,
full_key="[1]",
child_node=lambda _cfg: None,
),
id="list:insert_into_missing",
),
# get
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg.get(0),
exception_type=TypeError,
msg="Cannot get from a ListConfig object representing None",
key=0,
full_key="[0]",
),
id="list:get_from_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: cfg.get(0),
exception_type=MissingMandatoryValue,
msg="Cannot get from a missing ListConfig",
key=0,
full_key="[0]",
),
id="list:get_from_missing",
),
# sort
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.sort(),
exception_type=ReadonlyConfigError,
msg="Cannot sort a read-only ListConfig",
),
id="list:readonly:sort",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: cfg.sort(),
exception_type=TypeError,
msg="Cannot sort a ListConfig object representing None",
),
id="list:sort_from_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: cfg.sort(),
exception_type=MissingMandatoryValue,
msg="Cannot sort a missing ListConfig",
),
id="list:sort_from_missing",
),
# # iter
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.sort(),
exception_type=ReadonlyConfigError,
msg="Cannot sort a read-only ListConfig",
),
id="list:readonly:sort",
),
param(
Expected(
create=lambda: ListConfig(content=None),
op=lambda cfg: iter(cfg),
exception_type=TypeError,
msg="Cannot iterate a ListConfig object representing None",
),
id="list:iter_none",
),
param(
Expected(
create=lambda: ListConfig(content="???"),
op=lambda cfg: iter(cfg),
exception_type=MissingMandatoryValue,
msg="Cannot iterate a missing ListConfig",
),
id="list:iter_missing",
),
# delete
param(
Expected(
create=lambda: create_readonly([1, 2, 3]),
op=lambda cfg: cfg.__delitem__(0),
exception_type=ReadonlyConfigError,
msg="Cannot delete item from read-only ListConfig",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg._get_node(0),
),
id="list,readonly:del",
),
# to_object
param(
Expected(
create=lambda: OmegaConf.structured(User),
op=lambda cfg: OmegaConf.to_object(cfg),
exception_type=MissingMandatoryValue,
msg="Structured config of type `User` has missing mandatory value: name",
key="name",
child_node=lambda cfg: cfg._get_node("name"),
),
id="to_object:structured-missing-field",
),
param(
Expected(
create=lambda: OmegaConf.structured(NestedInterpolationToMissing),
op=lambda cfg: OmegaConf.to_object(cfg),
exception_type=InterpolationToMissingValueError,
msg=(
"MissingMandatoryValue while resolving interpolation: "
"Missing mandatory value: name"
),
key="baz",
full_key="subcfg.baz",
object_type=NestedInterpolationToMissing.BazParams,
parent_node=lambda cfg: cfg.subcfg,
child_node=lambda cfg: cfg.subcfg._get_node("baz"),
num_lines=3,
),
id="to_object:structured,throw_on_missing_interpolation",
),
param(
Expected(
create=lambda: OmegaConf.structured(StructuredInterpolationKeyError),
op=lambda cfg: OmegaConf.to_object(cfg),
exception_type=InterpolationKeyError,
key="name",
msg=("Interpolation key 'bar' not found"),
child_node=lambda cfg: cfg._get_node("name"),
),
id="to_object:structured,throw_on_interpolation_key_error",
),
# to_container throw_on_missing
param(
Expected(
create=lambda: OmegaConf.create(
{"subcfg": {"x": "${missing_val}"}, "missing_val": "???"}
),
op=lambda cfg: OmegaConf.to_container(
cfg, resolve=True, throw_on_missing=True
),
exception_type=InterpolationToMissingValueError,
msg=(
"MissingMandatoryValue while resolving interpolation: "
"Missing mandatory value: missing_val"
),
key="x",
full_key="subcfg.x",
parent_node=lambda cfg: cfg.subcfg,
child_node=lambda cfg: cfg.subcfg._get_node("x"),
),
id="to_container:throw_on_missing_interpolation",
),
param(
Expected(
create=lambda: DictConfig("???"),
op=lambda cfg: OmegaConf.to_container(cfg, throw_on_missing=True),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value",
),
id="to_container:throw_on_missing,dict",
),
param(
Expected(
create=lambda: ListConfig("???"),
op=lambda cfg: OmegaConf.to_container(cfg, throw_on_missing=True),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value",
),
id="to_container:throw_on_missing,list",
),
param(
Expected(
create=lambda: DictConfig({"a": "???"}),
op=lambda cfg: OmegaConf.to_container(cfg, throw_on_missing=True),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value: a",
key="a",
child_node=lambda cfg: cfg._get_node("a"),
),
id="to_container:throw_on_missing,dict_value",
),
param(
Expected(
create=lambda: ListConfig(["???"]),
op=lambda cfg: OmegaConf.to_container(cfg, throw_on_missing=True),
exception_type=MissingMandatoryValue,
msg="Missing mandatory value: 0",
key=0,
full_key="[0]",
child_node=lambda cfg: cfg._get_node(0),
),
id="to_container:throw_on_missing,list_item",
),
]
def create_struct(cfg: Any) -> Any:
cfg = OmegaConf.create(cfg)
OmegaConf.set_struct(cfg, True)
return cfg
def create_readonly(cfg: Any) -> Any:
cfg = OmegaConf.create(cfg)
OmegaConf.set_readonly(cfg, True)
return cfg
@mark.parametrize("expected", params)
def test_errors(expected: Expected, monkeypatch: Any) -> None:
monkeypatch.setenv("OC_CAUSE", "0")
cfg = expected.create()
expected.finalize(cfg)
msg = expected.msg
with raises(expected.exception_type, match=re.escape(msg)) as einfo:
try:
expected.op(cfg)
except Exception as e:
# helps in debugging
raise e
ex = einfo.value
assert isinstance(ex, OmegaConfBaseException)
assert ex.object_type == expected.object_type
assert ex.key == expected.key
if not expected.low_level:
assert ex.parent_node == expected.parent_node(cfg)
assert ex.child_node == expected.child_node(cfg)
assert ex.full_key == expected.full_key
assert isinstance(expected.full_key, str)
if expected.ref_type is not None:
assert ex.ref_type == expected.ref_type
if expected.ref_type is not None:
assert ex.ref_type_str == expected.ref_type_str
if expected.object_type is not None:
assert ex.object_type_str == expected.object_type_str
if isinstance(ex, OmegaConfBaseException):
assert str(ex).count("\n") == expected.num_lines
with monkeypatch.context() as m:
m.setenv("OC_CAUSE", "1")
try:
expected.op(cfg)
except Exception as e:
assert e.__cause__ is not None
with monkeypatch.context() as m:
m.setenv("OC_CAUSE", "0")
try:
expected.op(cfg)
except Exception as e:
assert e.__cause__ is None
def test_assertion_error() -> None:
"""Test the case where an `AssertionError` is processed in `format_and_raise()`"""
try:
assert False
except AssertionError as exc:
try:
format_and_raise(node=None, key=None, value=None, msg=str(exc), cause=exc)
except AssertionError as exc2:
assert exc2 is exc # we expect the original exception to be raised
else:
assert False
@mark.parametrize(
"register_func",
[OmegaConf.legacy_register_resolver, OmegaConf.register_new_resolver],
)
def test_resolver_error(restore_resolvers: Any, register_func: Any) -> None:
def div(x: Any, y: Any) -> float:
return float(x) / float(y)
register_func("div", div)
c = OmegaConf.create({"div_by_zero": "${div:1,0}"})
expected_msg = dedent(
"""\
ZeroDivisionError raised while resolving interpolation: float division( by zero)?
full_key: div_by_zero
object_type=dict"""
)
with raises(InterpolationResolutionError, match=expected_msg):
c.div_by_zero
@mark.parametrize(
["create_func", "arg"],
[
(OmegaConf.create, {"a": "${b"}),
(DictConfig, "${b"),
(ListConfig, "${b"),
],
)
def test_parse_error_on_creation(create_func: Any, arg: Any) -> None:
with raises(
GrammarParseError, match=re.escape("no viable alternative at input '${b'")
):
create_func(arg)
@mark.parametrize(
["create_func", "obj"],
[
param(DictConfig, {"zz": 10}, id="dict"),
param(DictConfig, {}, id="dict_empty"),
param(DictConfig, User, id="structured"),
param(ListConfig, ["zz"], id="list"),
param(ListConfig, [], id="list_empty"),
param(OmegaConf.create, {"zz": 10}, id="create"),
],
)
def test_parent_type_error_on_creation(create_func: Any, obj: Any) -> None:
with raises(
ConfigTypeError, match=re.escape("Parent type is not omegaconf.Container")
):
create_func(obj, parent={"a"}) # bad parent
def test_cycle_when_iterating_over_parents() -> None:
c = OmegaConf.create({"x": {}})
x_node = c._get_node("x")
assert isinstance(x_node, DictConfig)
c._set_parent(x_node)
with raises(
OmegaConfBaseException,
match=re.escape("Cycle when iterating over parents of key `x`"),
):
c._get_full_key("x")
def test_get_full_key_failure_in_format_and_raise() -> None:
c = OmegaConf.create({"x": {}})
x_node = c._get_node("x")
assert isinstance(x_node, DictConfig)
# We create a cycle in the parent relationship that will trigger a RecursionError
# when trying to access `c.x`. This test verifies that this RecursionError is properly
# raised even if another exception occurs in `format_and_raise()` when trying to
# obtain the full key.
c._set_parent(x_node)
# The exception message may vary depending on the Python version and seemingly
# irrelevant code changes. As a result, we only test the "full_key" part of the
# message (which we have control on).
match = re.escape(
"full_key: <unresolvable due to ConfigCycleDetectedException: "
"Cycle when iterating over parents of key `x`>"
)
with raises(RecursionError, match=match):
c.x
def test_dict_subclass_error() -> None:
"""
Test calling OmegaConf.structured(malformed_dict_subclass).
We expect a ValueError and a UserWarning (deprecation) to be raised simultaneously.
We are using a separate function instead of adding
warning support to the giant `test_errors` function above,
"""
src = Str2Int()
src["bar"] = "qux" # type: ignore
with raises(
ValidationError,
match=re.escape("Value 'qux' of type 'str' could not be converted to Integer"),
) as einfo:
with warns_dict_subclass_deprecated(Str2Int):
OmegaConf.structured(src)
ex = einfo.value
assert isinstance(ex, OmegaConfBaseException)
assert ex.key == "bar"
assert ex.full_key == "bar"
assert ex.ref_type is None
assert ex.object_type is None
assert ex.parent_node is None
assert ex.child_node is None
|
|
from collections import OrderedDict
from enum import Enum
from types import MappingProxyType
from .common import *
acoach_value = 10000
apothecary_value = 50000
cheerleader_value = 10000
deck = Enum('deck', (
('Miscellaneous Mayhem', 'Miscellaneous Mayhem'),
('MM', 'Miscellaneous Mayhem'),
('Special Team Plays', 'Special Team Plays'),
('STP', 'Special Team Plays'),
('Magic Items', 'Magic Items'),
('MI', 'Magic Items'),
('Dirty Tricks', 'Dirty Tricks'),
('DT', 'Dirty Tricks'),
('Good Karma', 'Good Karma'),
('GK', 'Good Karma'),
('Random Events', 'Random Events'),
('RE', 'Random Events'),
('Desperate Measures', 'Desperate Measures'),
('DM', 'Desperate Measures'),
))
deck_price = MappingProxyType(OrderedDict((
(deck['Miscellaneous Mayhem'], 50000),
(deck['Special Team Plays'], 50000),
(deck['Magic Items'], 50000),
(deck['Dirty Tricks'], 50000),
(deck['Good Karma'], 100000),
(deck['Random Events'], 200000),
(deck['Desperate Measures'], 400000),
)))
card = Enum('card', (
('Badyear Git', 'Badyear Git'),
('Sprinkler Malfunction', 'Sprinkler Malfunction'),
('Eclipse', 'Eclipse'),
('Fanatic Invasion', 'Fanatic Invasion'),
('Friendly Fans', 'Friendly Fans'),
('Rowdy Fans', 'Rowdy Fans'),
('Heckler', 'Heckler'),
('Hometown Fans', 'Hometown Fans'),
('Incoming!', 'Incoming!'),
('Rogue Wizard', 'Rogue Wizard'),
('Ball Clone', 'Ball Clone'),
('Johnny Waterboy', 'Johnny Waterboy'),
('That Babe\'s Got Talent!', 'That Babe\'s Got Talent!'),
('Come To Papa!', 'Come To Papa!'),
('Dogged Defense', 'Dogged Defense'),
('Flea Flicker', 'Flea Flicker'),
('Fumblerooski', 'Fumblerooski'),
('Going the Extra Mile', 'Going the Extra Mile'),
('Heroic Leap', 'Heroic Leap'),
('New Blocking Scheme', 'New Blocking Scheme'),
('Perfect Kick', 'Perfect Kick'),
('Option Play', 'Option Play'),
('Punt', 'Punt'),
('Spectacular Catch', 'Spectacular Catch'),
('Suicide Blitz', 'Suicide Blitz'),
('Wake Up Call', 'Wake Up Call'),
('Beguiling Bracers', 'Beguiling Bracers'),
('Belt of Invunerability', 'Belt of Invunerability'),
('Fawndough\'s Headband', 'Fawndough\'s Headband'),
('Force Shield', 'Force Shield'),
('Gikta\'s Strength of Da Bear',
'Gikta\'s Strength of Da Bear'),
('Gloves of Holding', 'Gloves of Holding'),
('Inertia Dampner', 'Inertia Dampner'),
('Lucky Charm', 'Lucky Charm'),
('Magic Gloves of Jark Longarm',
'Magic Gloves of Jark Longarm'),
('Good Old Magic Codpiece', 'Good Old Magic Codpiece'),
('Rabbit\'s Foot', 'Rabbit\'s Foot'),
('Ring of Teleportation', 'Ring of Teleportation'),
('Wand of Smashing', 'Wand of Smashing'),
('Blatant Foul', 'Blatant Foul'),
('Chop Block', 'Chop Block'),
('Custard Pie', 'Custard Pie'),
('Distract', 'Distract'),
('Greased Shoes', 'Greased Shoes'),
('Gromskull\'s Exploding Runes',
'Gromskull\'s Exploding Runes'),
('Illegal Substitution', 'Illegal Substitution'),
('Kicking Boots', 'Kicking Boots'),
('Pit Trap', 'Pit Trap'),
('Spiked Ball', 'Spiked Ball'),
('Stolen Playbook', 'Stolen Playbook'),
('Trampoline Trap', 'Trampoline Trap'),
('Witch\'s Brew', 'Witch\'s Brew'),
('All Out Blitz', 'All Out Blitz'),
('Banana Skin', 'Banana Skin'),
('Butterfingers', 'Butterfingers'),
('Chainsaw', 'Chainsaw'),
('Dazed and Confused', 'Dazed and Confused'),
('Doc Bonesaw', 'Doc Bonesaw'),
('Extra Training', 'Extra Training'),
('Fan Uproar', 'Fan Uproar'),
('Hurry Up Offense', 'Hurry Up Offense'),
('Intensive Training', 'Intensive Training'),
('Unsportsmanlike Conduct', 'Unsportsmanlike Conduct'),
('Knutt\'s Spell of Awesome Strength',
'Knutt\'s Spell of Awesome Strength'),
('Lewd Maneuvers', 'Lewd Maneuvers'),
('Lurve Potion', 'Lurve Potion'),
('Magic Helmet', 'Magic Helmet'),
('Miracle Worker', 'Miracle Worker'),
('One with the Kicker', 'One with the Kicker'),
('Razzle Dazzle', 'Razzle Dazzle'),
('Suitable Pitch', 'Suitable Pitch'),
('Rune of Fear', 'Rune of Fear'),
('Scutt\'s Scroll of Weather Magic',
'Scutt\'s Scroll of Weather Magic'),
('Stiletto', 'Stiletto'),
('Team Anthem', 'Team Anthem'),
('The Fan', 'The Fan'),
('The Wall', 'The Wall'),
('Woof Woof!', 'Woof Woof!'),
('Bad Habits', 'Bad Habits'),
('Ballista', 'Ballista'),
('Blackmail', 'Blackmail'),
('Buzzing', 'Buzzing'),
('Duh, Where Am I?', 'Duh, Where Am I?'),
('Ego Trip', 'Ego Trip'),
('Zap!', 'Zap!'),
('Gimme That!', 'Gimme That!'),
('Iron Man', 'Iron Man'),
('Kid Gloves', 'Kid Gloves'),
('Knuckledusters', 'Knuckledusters'),
('Magic Sponge', 'Magic Sponge'),
('Mine', 'Mine'),
('Not-So-Secret Weapon', 'Not-So-Secret Weapon'),
('Orcidas Sponsorship', 'Orcidas Sponsorship'),
('Rakarth\'s Curse of Petty Spite',
'Rakarth\'s Curse of Petty Spite'),
('Tackling Machine', 'Tackling Machine'),
('Get \'Em Lads!', 'Get \'Em Lads!'),
('Assassin', 'Assassin'),
('Doom and Gloom', 'Doom and Gloom'),
('Da Freight Train', 'Da Freight Train'),
('Morley\'s Revenge', 'Morley\'s Revenge'),
('I am the Greatest', 'I am the Greatest'),
('Mindblow', 'Mindblow'),
('Come On Boys!', 'Come On Boys!'),
('Mysterious Old Medicine Man',
'Mysterious Old Medicine Man'),
))
cards_of_deck = MappingProxyType(OrderedDict((
(deck['Miscellaneous Mayhem'], (
card['Badyear Git'],
card['Sprinkler Malfunction'],
card['Eclipse'],
card['Fanatic Invasion'],
card['Friendly Fans'],
card['Rowdy Fans'],
card['Heckler'],
card['Hometown Fans'],
card['Incoming!'],
card['Rogue Wizard'],
card['Ball Clone'],
card['Johnny Waterboy'],
card['That Babe\'s Got Talent!'],
)),
(deck['Special Team Plays'], (
card['Come To Papa!'],
card['Dogged Defense'],
card['Flea Flicker'],
card['Fumblerooski'],
card['Going the Extra Mile'],
card['Heroic Leap'],
card['New Blocking Scheme'],
card['Perfect Kick'],
card['Option Play'],
card['Punt'],
card['Spectacular Catch'],
card['Suicide Blitz'],
card['Wake Up Call'],
)),
(deck['Magic Items'], (
card['Beguiling Bracers'],
card['Belt of Invunerability'],
card['Fawndough\'s Headband'],
card['Force Shield'],
card['Gikta\'s Strength of Da Bear'],
card['Gloves of Holding'],
card['Inertia Dampner'],
card['Lucky Charm'],
card['Magic Gloves of Jark Longarm'],
card['Good Old Magic Codpiece'],
card['Rabbit\'s Foot'],
card['Ring of Teleportation'],
card['Wand of Smashing'],
)),
(deck['Dirty Tricks'], (
card['Blatant Foul'],
card['Chop Block'],
card['Custard Pie'],
card['Distract'],
card['Greased Shoes'],
card['Gromskull\'s Exploding Runes'],
card['Illegal Substitution'],
card['Kicking Boots'],
card['Pit Trap'],
card['Spiked Ball'],
card['Stolen Playbook'],
card['Trampoline Trap'],
card['Witch\'s Brew'],
)),
(deck['Good Karma'], (
card['All Out Blitz'],
card['Banana Skin'],
card['Butterfingers'],
card['Chainsaw'],
card['Dazed and Confused'],
card['Doc Bonesaw'],
card['Extra Training'],
card['Fan Uproar'],
card['Hurry Up Offense'],
card['Intensive Training'],
card['Unsportsmanlike Conduct'],
card['Knutt\'s Spell of Awesome Strength'],
card['Lewd Maneuvers'],
card['Lurve Potion'],
card['Magic Helmet'],
card['Miracle Worker'],
card['One with the Kicker'],
card['Razzle Dazzle'],
card['Suitable Pitch'],
card['Rune of Fear'],
card['Scutt\'s Scroll of Weather Magic'],
card['Stiletto'],
card['Team Anthem'],
card['The Fan'],
card['The Wall'],
card['Woof Woof!'],
)),
(deck['Random Events'], (
card['Bad Habits'],
card['Ballista'],
card['Blackmail'],
card['Buzzing'],
card['Duh, Where Am I?'],
card['Ego Trip'],
card['Zap!'],
card['Gimme That!'],
card['Iron Man'],
card['Kid Gloves'],
card['Knuckledusters'],
card['Magic Sponge'],
card['Mine'],
card['Not-So-Secret Weapon'],
card['Orcidas Sponsorship'],
card['Rakarth\'s Curse of Petty Spite'],
card['Tackling Machine'],
card['Get \'Em Lads!'],
)),
(deck['Desperate Measures'], (
card['Assassin'],
card['Doom and Gloom'],
card['Da Freight Train'],
card['Morley\'s Revenge'],
card['I am the Greatest'],
card['Mindblow'],
card['Come On Boys!'],
card['Mysterious Old Medicine Man'],
)),
)))
casualty = Enum('casualty', (
('Badly Hurt', 'Badly Hurt'),
('Broken Ribs', 'Broken Ribs'),
('Groin Strain', 'Groin Strain'),
('Gouged Eye', 'Gouged Eye'),
('Broken Jaw', 'Broken Jaw'),
('Fractured Arm', 'Fractured Arm'),
('Fractured Leg', 'Fractured Leg'),
('Smashed Hand', 'Smashed Hand'),
('Pinched Nerve', 'Pinched Nerve'),
('Damaged Back', 'Damaged Back'),
('Smashed Knee', 'Smashed Knee'),
('Smashed Hip', 'Smashed Hip'),
('Smashed Ankle', 'Smashed Ankle'),
('Serious Concussion', 'Serious Concussion'),
('Fractured Skull', 'Fractured Skull'),
('Broken Neck', 'Broken Neck'),
('Smashed Collar Bone', 'Smashed Collar Bone'),
('DEAD', 'DEAD'),
('Dead!', 'DEAD'),
))
casualty_table = MappingProxyType(OrderedDict((
(11, casualty['Badly Hurt']),
(12, casualty['Badly Hurt']),
(13, casualty['Badly Hurt']),
(14, casualty['Badly Hurt']),
(15, casualty['Badly Hurt']),
(16, casualty['Badly Hurt']),
(17, casualty['Badly Hurt']),
(18, casualty['Badly Hurt']),
(21, casualty['Badly Hurt']),
(22, casualty['Badly Hurt']),
(23, casualty['Badly Hurt']),
(24, casualty['Badly Hurt']),
(25, casualty['Badly Hurt']),
(26, casualty['Badly Hurt']),
(27, casualty['Badly Hurt']),
(28, casualty['Badly Hurt']),
(31, casualty['Badly Hurt']),
(32, casualty['Badly Hurt']),
(33, casualty['Badly Hurt']),
(34, casualty['Badly Hurt']),
(35, casualty['Badly Hurt']),
(36, casualty['Badly Hurt']),
(37, casualty['Badly Hurt']),
(38, casualty['Badly Hurt']),
(41, casualty['Broken Ribs']),
(42, casualty['Groin Strain']),
(43, casualty['Gouged Eye']),
(44, casualty['Broken Jaw']),
(45, casualty['Fractured Arm']),
(46, casualty['Fractured Leg']),
(47, casualty['Smashed Hand']),
(48, casualty['Pinched Nerve']),
(51, casualty['Damaged Back']),
(52, casualty['Smashed Knee']),
(53, casualty['Smashed Hip']),
(54, casualty['Smashed Ankle']),
(55, casualty['Serious Concussion']),
(56, casualty['Fractured Skull']),
(57, casualty['Broken Neck']),
(58, casualty['Smashed Collar Bone']),
(61, casualty['DEAD']),
(62, casualty['DEAD']),
(63, casualty['DEAD']),
(64, casualty['DEAD']),
(65, casualty['DEAD']),
(66, casualty['DEAD']),
(67, casualty['DEAD']),
(68, casualty['DEAD']),
)))
casualty_effect = Enum('casualty_effect', (
('Miss next game', 'M'),
('M', 'M'),
('MNG', 'M'),
('Niggling Injury', 'N'),
('N', 'N'),
('-1 MA', '-MA'),
('-MA', '-MA'),
('-1 AV', '-AV'),
('-AV', '-AV'),
('-1 AG', '-AG'),
('-AG', '-AG'),
('-1 ST', '-ST'),
('-ST', '-ST'),
('Dead!', 'D'),
('D', 'D'),
('DEAD', 'D'),
))
effect_of_casualty = MappingProxyType(OrderedDict((
(casualty['Badly Hurt'], ()),
(casualty['Broken Ribs'],
(casualty_effect['Miss next game'],)),
(casualty['Groin Strain'],
(casualty_effect['Miss next game'],)),
(casualty['Gouged Eye'],
(casualty_effect['Miss next game'],)),
(casualty['Broken Jaw'],
(casualty_effect['Miss next game'],)),
(casualty['Fractured Arm'],
(casualty_effect['Miss next game'],)),
(casualty['Fractured Leg'],
(casualty_effect['Miss next game'],)),
(casualty['Smashed Hand'],
(casualty_effect['Miss next game'],)),
(casualty['Pinched Nerve'],
(casualty_effect['Miss next game'],)),
(casualty['Damaged Back'], (
casualty_effect['Niggling Injury'],
casualty_effect['Miss next game'])),
(casualty['Smashed Knee'], (
casualty_effect['Niggling Injury'],
casualty_effect['Miss next game'])),
(casualty['Smashed Hip'], (
casualty_effect['-1 MA'],
casualty_effect['Miss next game'])),
(casualty['Smashed Ankle'], (
casualty_effect['-1 MA'],
casualty_effect['Miss next game'])),
(casualty['Serious Concussion'], (
casualty_effect['-1 AV'],
casualty_effect['Miss next game'])),
(casualty['Fractured Skull'], (
casualty_effect['-1 AV'],
casualty_effect['Miss next game'])),
(casualty['Broken Neck'], (
casualty_effect['-1 AG'],
casualty_effect['Miss next game'])),
(casualty['Smashed Collar Bone'], (
casualty_effect['-1 ST'],
casualty_effect['Miss next game'])),
(casualty['DEAD'],
(casualty_effect['Dead!'],)),
)))
double_skill_value = 30000
fanfactor_value = 10000
inducement_price = {
'Bloodweiser Babe': {'default': 50000},
'Bribe': {'default': 100000, 'Goblin': 50000},
'Extra Team Training': {'default': 100000},
'Halfling Master Chef': {'default': 300000,
'Halfling': 100000},
'Igor': {'default': 100000},
'Wandering Apothecary': {'default': 100000},
'Wizard': {'default': 150000},
}
merc_extra_cost = 30000
merc_normal_skill_value = 50000
normal_skill_value = 20000
progression_title = Enum('progression_title', (
('Rookie', 'Rookie'),
('Experienced', 'Experienced'),
('Veteran', 'Veteran'),
('Emerging Star', 'Emerging Star'),
('Star Player', 'Star'),
('Star', 'Star'),
('Super Star', 'Super Star'),
('Super-Star', 'Super Star'),
('Legend', 'Legend'),
))
progression = MappingProxyType(OrderedDict((
(0, progression_title['Rookie']),
(6, progression_title['Experienced']),
(16, progression_title['Veteran']),
(31, progression_title['Emerging Star']),
(51, progression_title['Star Player']),
(76, progression_title['Super Star']),
(176, progression_title['Legend']),
)))
skillcat = Enum('skillcat', (
('General', 'G'),
('G', 'G'),
('GENERAL', 'G'),
('Agility', 'A'),
('A', 'A'),
('AGILITY', 'A'),
('Strength', 'S'),
('S', 'S'),
('STRENGTH', 'S'),
('Passing', 'P'),
('P', 'P'),
('PASSING', 'P'),
('Mutation', 'M'),
('M', 'M'),
('MUTATION', 'M'),
('Extraordinary', 'Ex'),
('Ex', 'Ex'),
('EXTRAORDINARY', 'Ex'),
))
skill = Enum('skill', (
('Block', 'Block'),
('Dauntless', 'Dauntless'),
('Dirty Player', 'Dirty Player'),
('Fend', 'Fend'),
('Frenzy', 'Frenzy'),
('Kick', 'Kick'),
('Kick-Off Return', 'Kick-Off Return'),
('Pass Block', 'Pass Block'),
('Pro', 'Pro'),
('Shadowing', 'Shadowing'),
('Strip Ball', 'Strip Ball'),
('Sure Hands', 'Sure Hands'),
('Tackle', 'Tackle'),
('Wrestle', 'Wrestle'),
('Catch', 'Catch'),
('Diving Catch', 'Diving Catch'),
('Diving Tackle', 'Diving Tackle'),
('Dodge', 'Dodge'),
('Jump Up', 'Jump Up'),
('Leap', 'Leap'),
('Side Step', 'Side Step'),
('Sneaky Git', 'Sneaky Git'),
('Sprint', 'Sprint'),
('Sure Feet', 'Sure Feet'),
('Break Tackle', 'Break Tackle'),
('Grab', 'Grab'),
('Guard', 'Guard'),
('Juggernaut', 'Juggernaut'),
('Mighty Blow', 'Mighty Blow'),
('Multiple Block', 'Multiple Block'),
('Piling On', 'Piling On'),
('Stand Firm', 'Stand Firm'),
('Strong Arm', 'Strong Arm'),
('Thick Skull', 'Thick Skull'),
('Accurate', 'Accurate'),
('Dump-Off', 'Dump-Off'),
('Hail Mary Pass', 'Hail Mary Pass'),
('Hail Mary', 'Hail Mary Pass'),
('Leader', 'Leader'),
('Nerves of Steel', 'Nerves of Steel'),
('Pass', 'Pass'),
('Safe Throw', 'Safe Throw'),
('Big Hand', 'Big Hand'),
('Claw / Claws', 'Claw'),
('Claw(s)', 'Claw'),
('Claw', 'Claw'),
('Claws', 'Claw'),
('Disturbing Presence', 'Disturbing Presence'),
('Extra Arms', 'Extra Arms'),
('Foul Appearance', 'Foul Appearance'),
('Horns', 'Horns'),
('Prehensile Tail', 'Prehensile Tail'),
('Tentacles', 'Tentacles'),
('Two Heads', 'Two Heads'),
('Very Long Legs', 'Very Long Legs'),
('Always Hungry', 'Always Hungry'),
('Animosity', 'Animosity'),
('Ball & Chain', 'Ball & Chain'),
('Blood Lust', 'Blood Lust'),
('Bombardier', 'Bombardier'),
('Bone-head', 'Bone-head'),
('Bone Head', 'Bone-head'),
('Bone-Head', 'Bone-head'),
('Chainsaw', 'Chainsaw'),
('Decay', 'Decay'),
('Fan Favourite', 'Fan Favourite'),
('Hypnotic Gaze', 'Hypnotic Gaze'),
('Loner', 'Loner'),
('No Hands', 'No Hands'),
('Nurgle\'s Rot', 'Nurgle\'s Rot'),
('Really Stupid', 'Really Stupid'),
('Regeneration', 'Regeneration'),
('Right Stuff', 'Right Stuff'),
('Secret Weapon', 'Secret Weapon'),
('Stab', 'Stab'),
('Stakes', 'Stakes'),
('Stunty', 'Stunty'),
('Take Root', 'Take Root'),
('Throw Team-Mate', 'Throw Team-Mate'),
('Titchy', 'Titchy'),
('Wild Animal', 'Wild Animal'),
))
skill_by_skillcat = MappingProxyType(OrderedDict((
(skillcat['GENERAL'], frozenset((
skill['Block'],
skill['Dauntless'],
skill['Dirty Player'],
skill['Fend'],
skill['Frenzy'],
skill['Kick'],
skill['Kick-Off Return'],
skill['Pass Block'],
skill['Pro'],
skill['Shadowing'],
skill['Strip Ball'],
skill['Sure Hands'],
skill['Tackle'],
skill['Wrestle'],
))),
(skillcat['AGILITY'], frozenset((
skill['Catch'],
skill['Diving Catch'],
skill['Diving Tackle'],
skill['Dodge'],
skill['Jump Up'],
skill['Leap'],
skill['Side Step'],
skill['Sneaky Git'],
skill['Sprint'],
skill['Sure Feet'],
))),
(skillcat['STRENGTH'], frozenset((
skill['Break Tackle'],
skill['Grab'],
skill['Guard'],
skill['Juggernaut'],
skill['Mighty Blow'],
skill['Multiple Block'],
skill['Piling On'],
skill['Stand Firm'],
skill['Strong Arm'],
skill['Thick Skull'],
))),
(skillcat['PASSING'], frozenset((
skill['Accurate'],
skill['Dump-Off'],
skill['Hail Mary Pass'],
skill['Leader'],
skill['Nerves of Steel'],
skill['Pass'],
skill['Safe Throw'],
))),
(skillcat['MUTATION'], frozenset((
skill['Big Hand'],
skill['Claw / Claws'],
skill['Disturbing Presence'],
skill['Extra Arms'],
skill['Foul Appearance'],
skill['Horns'],
skill['Prehensile Tail'],
skill['Tentacles'],
skill['Two Heads'],
skill['Very Long Legs'],
))),
(skillcat['EXTRAORDINARY'], frozenset((
skill['Always Hungry'],
skill['Animosity'],
skill['Ball & Chain'],
skill['Blood Lust'],
skill['Bombardier'],
skill['Bone-head'],
skill['Chainsaw'],
skill['Decay'],
skill['Fan Favourite'],
skill['Hypnotic Gaze'],
skill['Loner'],
skill['No Hands'],
skill['Nurgle\'s Rot'],
skill['Really Stupid'],
skill['Regeneration'],
skill['Right Stuff'],
skill['Secret Weapon'],
skill['Stab'],
skill['Stakes'],
skill['Stunty'],
skill['Take Root'],
skill['Throw Team-Mate'],
skill['Titchy'],
skill['Wild Animal'],
))),
)))
stat_value = {
'+MA': 30000,
'+AG': 40000,
'+ST': 50000,
'+AV': 30000,
}
## REDUNDANT BUT USEFUL ########################################
deck_of_card = MappingProxyType(
{card: deck
for deck, cards in cards_of_deck.items()
for card in cards}
)
card_price = MappingProxyType(
{card: deck_price[deck]
for card, deck in deck_of_card.items()}
)
skillcat_by_skill = MappingProxyType(
{skill: skillcat
for skillcat, skills in skill_by_skillcat.items()
for skill in skills}
)
|
|
#!/usr/bin/python3
# Creates DNS zone files for all of the domains of all of the mail users
# and mail aliases and restarts nsd.
########################################################################
import os, os.path, urllib.parse, datetime, re, hashlib, base64
import ipaddress
import rtyaml
import dns.resolver
from mailconfig import get_mail_domains
from utils import shell, load_env_vars_from_file, safe_domain_name, sort_domains
def get_dns_domains(env):
# Add all domain names in use by email users and mail aliases and ensure
# PRIMARY_HOSTNAME is in the list.
domains = set()
domains |= get_mail_domains(env)
domains.add(env['PRIMARY_HOSTNAME'])
return domains
def get_dns_zones(env):
# What domains should we create DNS zones for? Never create a zone for
# a domain & a subdomain of that domain.
domains = get_dns_domains(env)
# Exclude domains that are subdomains of other domains we know. Proceed
# by looking at shorter domains first.
zone_domains = set()
for domain in sorted(domains, key=lambda d : len(d)):
for d in zone_domains:
if domain.endswith("." + d):
# We found a parent domain already in the list.
break
else:
# 'break' did not occur: there is no parent domain.
zone_domains.add(domain)
# Make a nice and safe filename for each domain.
zonefiles = []
for domain in zone_domains:
zonefiles.append([domain, safe_domain_name(domain) + ".txt"])
# Sort the list so that the order is nice and so that nsd.conf has a
# stable order so we don't rewrite the file & restart the service
# meaninglessly.
zone_order = sort_domains([ zone[0] for zone in zonefiles ], env)
zonefiles.sort(key = lambda zone : zone_order.index(zone[0]) )
return zonefiles
def get_custom_dns_config(env):
try:
return rtyaml.load(open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml')))
except:
return { }
def write_custom_dns_config(config, env):
config_yaml = rtyaml.dump(config)
with open(os.path.join(env['STORAGE_ROOT'], 'dns/custom.yaml'), "w") as f:
f.write(config_yaml)
def do_dns_update(env, force=False):
# What domains (and their zone filenames) should we build?
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
# Custom records to add to zones.
additional_records = get_custom_dns_config(env)
# Write zone files.
os.makedirs('/etc/nsd/zones', exist_ok=True)
updated_domains = []
for i, (domain, zonefile) in enumerate(zonefiles):
# Build the records to put in the zone.
records = build_zone(domain, domains, additional_records, env)
# See if the zone has changed, and if so update the serial number
# and write the zone file.
if not write_nsd_zone(domain, "/etc/nsd/zones/" + zonefile, records, env, force):
# Zone was not updated. There were no changes.
continue
# If this is a .justtesting.email domain, then post the update.
try:
justtestingdotemail(domain, records)
except:
# Hmm. Might be a network issue. If we stop now, will we end
# up in an inconsistent state? Let's just continue.
pass
# Mark that we just updated this domain.
updated_domains.append(domain)
# Sign the zone.
#
# Every time we sign the zone we get a new result, which means
# we can't sign a zone without bumping the zone's serial number.
# Thus we only sign a zone if write_nsd_zone returned True
# indicating the zone changed, and thus it got a new serial number.
# write_nsd_zone is smart enough to check if a zone's signature
# is nearing expiration and if so it'll bump the serial number
# and return True so we get a chance to re-sign it.
sign_zone(domain, zonefile, env)
# Now that all zones are signed (some might not have changed and so didn't
# just get signed now, but were before) update the zone filename so nsd.conf
# uses the signed file.
for i in range(len(zonefiles)):
zonefiles[i][1] += ".signed"
# Write the main nsd.conf file.
if write_nsd_conf(zonefiles, additional_records, env):
# Make sure updated_domains contains *something* if we wrote an updated
# nsd.conf so that we know to restart nsd.
if len(updated_domains) == 0:
updated_domains.append("DNS configuration")
# Kick nsd if anything changed.
if len(updated_domains) > 0:
shell('check_call', ["/usr/sbin/service", "nsd", "restart"])
# Write the OpenDKIM configuration tables.
if write_opendkim_tables(domains, env):
# Settings changed. Kick opendkim.
shell('check_call', ["/usr/sbin/service", "opendkim", "restart"])
if len(updated_domains) == 0:
# If this is the only thing that changed?
updated_domains.append("OpenDKIM configuration")
if len(updated_domains) == 0:
# if nothing was updated (except maybe OpenDKIM's files), don't show any output
return ""
else:
return "updated DNS: " + ",".join(updated_domains) + "\n"
########################################################################
def build_zone(domain, all_domains, additional_records, env, is_zone=True):
records = []
# For top-level zones, define the authoritative name servers.
#
# Normally we are our own nameservers. Some TLDs require two distinct IP addresses,
# so we allow the user to override the second nameserver definition so that
# secondary DNS can be set up elsewhere.
#
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
if is_zone:
# Obligatory definition of ns1.PRIMARY_HOSTNAME.
records.append((None, "NS", "ns1.%s." % env["PRIMARY_HOSTNAME"], False))
# Define ns2.PRIMARY_HOSTNAME or whatever the user overrides.
secondary_ns = additional_records.get("_secondary_nameserver", "ns2." + env["PRIMARY_HOSTNAME"])
records.append((None, "NS", secondary_ns+'.', False))
# In PRIMARY_HOSTNAME...
if domain == env["PRIMARY_HOSTNAME"]:
# Define ns1 and ns2.
# 'False' in the tuple indicates these records would not be used if the zone
# is managed outside of the box.
records.append(("ns1", "A", env["PUBLIC_IP"], False))
records.append(("ns2", "A", env["PUBLIC_IP"], False))
if env.get('PUBLIC_IPV6'):
records.append(("ns1", "AAAA", env["PUBLIC_IPV6"], False))
records.append(("ns2", "AAAA", env["PUBLIC_IPV6"], False))
# Set the A/AAAA records. Do this early for the PRIMARY_HOSTNAME so that the user cannot override them
# and we can provide different explanatory text.
records.append((None, "A", env["PUBLIC_IP"], "Required. Sets the IP address of the box."))
if env.get("PUBLIC_IPV6"): records.append((None, "AAAA", env["PUBLIC_IPV6"], "Required. Sets the IPv6 address of the box."))
# Add a DANE TLSA record for SMTP.
records.append(("_25._tcp", "TLSA", build_tlsa_record(env), "Recommended when DNSSEC is enabled. Advertises to mail servers connecting to the box that mandatory encryption should be used."))
# Add a SSHFP records to help SSH key validation. One per available SSH key on this system.
for value in build_sshfp_records():
records.append((None, "SSHFP", value, "Optional. Provides an out-of-band method for verifying an SSH key before connecting. Use 'VerifyHostKeyDNS yes' (or 'VerifyHostKeyDNS ask') when connecting with ssh."))
# The MX record says where email for the domain should be delivered: Here!
records.append((None, "MX", "10 %s." % env["PRIMARY_HOSTNAME"], "Required. Specifies the hostname (and priority) of the machine that handles @%s mail." % domain))
# SPF record: Permit the box ('mx', see above) to send mail on behalf of
# the domain, and no one else.
records.append((None, "TXT", 'v=spf1 mx -all', "Recommended. Specifies that only the box is permitted to send @%s mail." % domain))
# Add DNS records for any subdomains of this domain. We should not have a zone for
# both a domain and one of its subdomains.
subdomains = [d for d in all_domains if d.endswith("." + domain)]
for subdomain in subdomains:
subdomain_qname = subdomain[0:-len("." + domain)]
subzone = build_zone(subdomain, [], additional_records, env, is_zone=False)
for child_qname, child_rtype, child_value, child_explanation in subzone:
if child_qname == None:
child_qname = subdomain_qname
else:
child_qname += "." + subdomain_qname
records.append((child_qname, child_rtype, child_value, child_explanation))
def has_rec(qname, rtype, prefix=None):
for rec in records:
if rec[0] == qname and rec[1] == rtype and (prefix is None or rec[2].startswith(prefix)):
return True
return False
# The user may set other records that don't conflict with our settings.
for qname, rtype, value in get_custom_records(domain, additional_records, env):
if has_rec(qname, rtype): continue
records.append((qname, rtype, value, "(Set by user.)"))
# Add defaults if not overridden by the user's custom settings (and not otherwise configured).
# Any "CNAME" record on the qname overrides A and AAAA.
defaults = [
(None, "A", env["PUBLIC_IP"], "Required. May have a different value. Sets the IP address that %s resolves to for web hosting and other services besides mail. The A record must be present but its value does not affect mail delivery." % domain),
("www", "A", env["PUBLIC_IP"], "Optional. Sets the IP address that www.%s resolves to, e.g. for web hosting." % domain),
(None, "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that %s resolves to, e.g. for web hosting. (It is not necessary for receiving mail on this domain.)" % domain),
("www", "AAAA", env.get('PUBLIC_IPV6'), "Optional. Sets the IPv6 address that www.%s resolves to, e.g. for web hosting." % domain),
]
for qname, rtype, value, explanation in defaults:
if value is None or value.strip() == "": continue # skip IPV6 if not set
if not is_zone and qname == "www": continue # don't create any default 'www' subdomains on what are themselves subdomains
# Set the default record, but not if:
# (1) there is not a user-set record of the same type already
# (2) there is not a CNAME record already, since you can't set both and who knows what takes precedence
# (2) there is not an A record already (if this is an A record this is a dup of (1), and if this is an AAAA record then don't set a default AAAA record if the user sets a custom A record, since the default wouldn't make sense and it should not resolve if the user doesn't provide a new AAAA record)
if not has_rec(qname, rtype) and not has_rec(qname, "CNAME") and not has_rec(qname, "A"):
records.append((qname, rtype, value, explanation))
# Append the DKIM TXT record to the zone as generated by OpenDKIM.
opendkim_record_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.txt')
with open(opendkim_record_file) as orf:
m = re.match(r'(\S+)\s+IN\s+TXT\s+\( "([^"]+)"\s+"([^"]+)"\s*\)', orf.read(), re.S)
val = m.group(2) + m.group(3)
records.append((m.group(1), "TXT", val, "Recommended. Provides a way for recipients to verify that this machine sent @%s mail." % domain))
# Append a DMARC record.
records.append(("_dmarc", "TXT", 'v=DMARC1; p=quarantine', "Optional. Specifies that mail that does not originate from the box but claims to be from @%s is suspect and should be quarantined by the recipient's mail system." % domain))
# For any subdomain with an A record but no SPF or DMARC record, add strict policy records.
all_resolvable_qnames = set(r[0] for r in records if r[1] in ("A", "AAAA"))
for qname in all_resolvable_qnames:
if not has_rec(qname, "TXT", prefix="v=spf1 "):
records.append((qname, "TXT", 'v=spf1 a mx -all', "Prevents unauthorized use of this domain name for outbound mail by requiring outbound mail to originate from the indicated host(s)."))
dmarc_qname = "_dmarc" + ("" if qname is None else "." + qname)
if not has_rec(dmarc_qname, "TXT", prefix="v=DMARC1; "):
records.append((dmarc_qname, "TXT", 'v=DMARC1; p=reject', "Prevents unauthorized use of this domain name for outbound mail by requiring a valid DKIM signature."))
# Sort the records. The None records *must* go first in the nsd zone file. Otherwise it doesn't matter.
records.sort(key = lambda rec : list(reversed(rec[0].split(".")) if rec[0] is not None else ""))
return records
########################################################################
def get_custom_records(domain, additional_records, env):
for qname, value in additional_records.items():
# We don't count the secondary nameserver config (if present) as a record - that would just be
# confusing to users. Instead it is accessed/manipulated directly via (get/set)_custom_dns_config.
if qname == "_secondary_nameserver": continue
# Is this record for the domain or one of its subdomains?
# If `domain` is None, return records for all domains.
if domain is not None and qname != domain and not qname.endswith("." + domain): continue
# Turn the fully qualified domain name in the YAML file into
# our short form (None => domain, or a relative QNAME) if
# domain is not None.
if domain is not None:
if qname == domain:
qname = None
else:
qname = qname[0:len(qname)-len("." + domain)]
# Short form. Mapping a domain name to a string is short-hand
# for creating A records.
if isinstance(value, str):
values = [("A", value)]
if value == "local" and env.get("PUBLIC_IPV6"):
values.append( ("AAAA", value) )
# A mapping creates multiple records.
elif isinstance(value, dict):
values = value.items()
# No other type of data is allowed.
else:
raise ValueError()
for rtype, value2 in values:
# The "local" keyword on A/AAAA records are short-hand for our own IP.
# This also flags for web configuration that the user wants a website here.
if rtype == "A" and value2 == "local":
value2 = env["PUBLIC_IP"]
if rtype == "AAAA" and value2 == "local":
if "PUBLIC_IPV6" not in env: continue # no IPv6 address is available so don't set anything
value2 = env["PUBLIC_IPV6"]
yield (qname, rtype, value2)
########################################################################
def build_tlsa_record(env):
# A DANE TLSA record in DNS specifies that connections on a port
# must use TLS and the certificate must match a particular certificate.
#
# Thanks to http://blog.huque.com/2012/10/dnssec-and-certificates.html
# for explaining all of this!
# Get the hex SHA256 of the DER-encoded server certificate:
certder = shell("check_output", [
"/usr/bin/openssl",
"x509",
"-in", os.path.join(env["STORAGE_ROOT"], "ssl", "ssl_certificate.pem"),
"-outform", "DER"
],
return_bytes=True)
certhash = hashlib.sha256(certder).hexdigest()
# Specify the TLSA parameters:
# 3: This is the certificate that the client should trust. No CA is needed.
# 0: The whole certificate is matched.
# 1: The certificate is SHA256'd here.
return "3 0 1 " + certhash
def build_sshfp_records():
# The SSHFP record is a way for us to embed this server's SSH public
# key fingerprint into the DNS so that remote hosts have an out-of-band
# method to confirm the fingerprint. See RFC 4255 and RFC 6594. This
# depends on DNSSEC.
#
# On the client side, set SSH's VerifyHostKeyDNS option to 'ask' to
# include this info in the key verification prompt or 'yes' to trust
# the SSHFP record.
#
# See https://github.com/xelerance/sshfp for inspiriation.
algorithm_number = {
"ssh-rsa": 1,
"ssh-dss": 2,
"ecdsa-sha2-nistp256": 3,
}
# Get our local fingerprints by running ssh-keyscan. The output looks
# like the known_hosts file: hostname, keytype, fingerprint. The order
# of the output is arbitrary, so sort it to prevent spurrious updates
# to the zone file (that trigger bumping the serial number).
keys = shell("check_output", ["ssh-keyscan", "localhost"])
for key in sorted(keys.split("\n")):
if key.strip() == "" or key[0] == "#": continue
try:
host, keytype, pubkey = key.split(" ")
yield "%d %d ( %s )" % (
algorithm_number[keytype],
2, # specifies we are using SHA-256 on next line
hashlib.sha256(base64.b64decode(pubkey)).hexdigest().upper(),
)
except:
# Lots of things can go wrong. Don't let it disturb the DNS
# zone.
pass
########################################################################
def write_nsd_zone(domain, zonefile, records, env, force):
# On the $ORIGIN line, there's typically a ';' comment at the end explaining
# what the $ORIGIN line does. Any further data after the domain confuses
# ldns-signzone, however. It used to say '; default zone domain'.
# The SOA contact address for all of the domains on this system is hostmaster
# @ the PRIMARY_HOSTNAME. Hopefully that's legit.
# For the refresh through TTL fields, a good reference is:
# http://www.peerwisdom.org/2013/05/15/dns-understanding-the-soa-record/
zone = """
$ORIGIN {domain}.
$TTL 1800 ; default time to live
@ IN SOA ns1.{primary_domain}. hostmaster.{primary_domain}. (
__SERIAL__ ; serial number
7200 ; Refresh (secondary nameserver update interval)
1800 ; Retry (when refresh fails, how often to try again)
1209600 ; Expire (when refresh fails, how long secondary nameserver will keep records around anyway)
1800 ; Negative TTL (how long negative responses are cached)
)
"""
# Replace replacement strings.
zone = zone.format(domain=domain.encode("idna").decode("ascii"), primary_domain=env["PRIMARY_HOSTNAME"].encode("idna").decode("ascii"))
# Add records.
for subdomain, querytype, value, explanation in records:
if subdomain:
zone += subdomain.encode("idna").decode("ascii")
zone += "\tIN\t" + querytype + "\t"
if querytype == "TXT":
# Quote and escape.
value = value.replace('\\', '\\\\') # escape backslashes
value = value.replace('"', '\\"') # escape quotes
value = '"' + value + '"' # wrap in quotes
elif querytype in ("NS", "CNAME"):
# These records must be IDNA-encoded.
value = value.encode("idna").decode("ascii")
elif querytype == "MX":
# Also IDNA-encoded, but must parse first.
priority, host = value.split(" ", 1)
host = host.encode("idna").decode("ascii")
value = priority + " " + host
zone += value + "\n"
# DNSSEC requires re-signing a zone periodically. That requires
# bumping the serial number even if no other records have changed.
# We don't see the DNSSEC records yet, so we have to figure out
# if a re-signing is necessary so we can prematurely bump the
# serial number.
force_bump = False
if not os.path.exists(zonefile + ".signed"):
# No signed file yet. Shouldn't normally happen unless a box
# is going from not using DNSSEC to using DNSSEC.
force_bump = True
else:
# We've signed the domain. Check if we are close to the expiration
# time of the signature. If so, we'll force a bump of the serial
# number so we can re-sign it.
with open(zonefile + ".signed") as f:
signed_zone = f.read()
expiration_times = re.findall(r"\sRRSIG\s+SOA\s+\d+\s+\d+\s\d+\s+(\d{14})", signed_zone)
if len(expiration_times) == 0:
# weird
force_bump = True
else:
# All of the times should be the same, but if not choose the soonest.
expiration_time = min(expiration_times)
expiration_time = datetime.datetime.strptime(expiration_time, "%Y%m%d%H%M%S")
if expiration_time - datetime.datetime.now() < datetime.timedelta(days=3):
# We're within three days of the expiration, so bump serial & resign.
force_bump = True
# Set the serial number.
serial = datetime.datetime.now().strftime("%Y%m%d00")
if os.path.exists(zonefile):
# If the zone already exists, is different, and has a later serial number,
# increment the number.
with open(zonefile) as f:
existing_zone = f.read()
m = re.search(r"(\d+)\s*;\s*serial number", existing_zone)
if m:
# Clear out the serial number in the existing zone file for the
# purposes of seeing if anything *else* in the zone has changed.
existing_serial = m.group(1)
existing_zone = existing_zone.replace(m.group(0), "__SERIAL__ ; serial number")
# If the existing zone is the same as the new zone (modulo the serial number),
# there is no need to update the file. Unless we're forcing a bump.
if zone == existing_zone and not force_bump and not force:
return False
# If the existing serial is not less than a serial number
# based on the current date plus 00, increment it. Otherwise,
# the serial number is less than our desired new serial number
# so we'll use the desired new number.
if existing_serial >= serial:
serial = str(int(existing_serial) + 1)
zone = zone.replace("__SERIAL__", serial)
# Write the zone file.
with open(zonefile, "w") as f:
f.write(zone)
return True # file is updated
########################################################################
def write_nsd_conf(zonefiles, additional_records, env):
# Basic header.
nsdconf = """
server:
hide-version: yes
# identify the server (CH TXT ID.SERVER entry).
identity: ""
# The directory for zonefile: files.
zonesdir: "/etc/nsd/zones"
"""
# Since we have bind9 listening on localhost for locally-generated
# DNS queries that require a recursive nameserver, and the system
# might have other network interfaces for e.g. tunnelling, we have
# to be specific about the network interfaces that nsd binds to.
for ipaddr in (env.get("PRIVATE_IP", "") + " " + env.get("PRIVATE_IPV6", "")).split(" "):
if ipaddr == "": continue
nsdconf += " ip-address: %s\n" % ipaddr
# Append the zones.
for domain, zonefile in zonefiles:
nsdconf += """
zone:
name: %s
zonefile: %s
""" % (domain.encode("idna").decode("ascii"), zonefile)
# If a custom secondary nameserver has been set, allow zone transfers
# and notifies to that nameserver.
if additional_records.get("_secondary_nameserver"):
# Get the IP address of the nameserver by resolving it.
hostname = additional_records.get("_secondary_nameserver")
resolver = dns.resolver.get_default_resolver()
response = dns.resolver.query(hostname+'.', "A")
ipaddr = str(response[0])
nsdconf += """\tnotify: %s NOKEY
provide-xfr: %s NOKEY
""" % (ipaddr, ipaddr)
# Check if the nsd.conf is changing. If it isn't changing,
# return False to flag that no change was made.
with open("/etc/nsd/nsd.conf") as f:
if f.read() == nsdconf:
return False
with open("/etc/nsd/nsd.conf", "w") as f:
f.write(nsdconf)
return True
########################################################################
def dnssec_choose_algo(domain, env):
if '.' in domain and domain.rsplit('.')[-1] in \
("email", "guide", "fund"):
# At GoDaddy, RSASHA256 is the only algorithm supported
# for .email and .guide.
# A variety of algorithms are supported for .fund. This
# is preferred.
return "RSASHA256"
# For any domain we were able to sign before, don't change the algorithm
# on existing users. We'll probably want to migrate to SHA256 later.
return "RSASHA1-NSEC3-SHA1"
def sign_zone(domain, zonefile, env):
algo = dnssec_choose_algo(domain, env)
dnssec_keys = load_env_vars_from_file(os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/%s.conf' % algo))
# From here, use the IDNA encoding of the domain name.
domain = domain.encode("idna").decode("ascii")
# In order to use the same keys for all domains, we have to generate
# a new .key file with a DNSSEC record for the specific domain. We
# can reuse the same key, but it won't validate without a DNSSEC
# record specifically for the domain.
#
# Copy the .key and .private files to /tmp to patch them up.
#
# Use os.umask and open().write() to securely create a copy that only
# we (root) can read.
files_to_kill = []
for key in ("KSK", "ZSK"):
if dnssec_keys.get(key, "").strip() == "": raise Exception("DNSSEC is not properly set up.")
oldkeyfn = os.path.join(env['STORAGE_ROOT'], 'dns/dnssec/' + dnssec_keys[key])
newkeyfn = '/tmp/' + dnssec_keys[key].replace("_domain_", domain)
dnssec_keys[key] = newkeyfn
for ext in (".private", ".key"):
if not os.path.exists(oldkeyfn + ext): raise Exception("DNSSEC is not properly set up.")
with open(oldkeyfn + ext, "r") as fr:
keydata = fr.read()
keydata = keydata.replace("_domain_", domain) # trick ldns-signkey into letting our generic key be used by this zone
fn = newkeyfn + ext
prev_umask = os.umask(0o77) # ensure written file is not world-readable
try:
with open(fn, "w") as fw:
fw.write(keydata)
finally:
os.umask(prev_umask) # other files we write should be world-readable
files_to_kill.append(fn)
# Do the signing.
expiry_date = (datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y%m%d")
shell('check_call', ["/usr/bin/ldns-signzone",
# expire the zone after 30 days
"-e", expiry_date,
# use NSEC3
"-n",
# zonefile to sign
"/etc/nsd/zones/" + zonefile,
# keys to sign with (order doesn't matter -- it'll figure it out)
dnssec_keys["KSK"],
dnssec_keys["ZSK"],
])
# Create a DS record based on the patched-up key files. The DS record is specific to the
# zone being signed, so we can't use the .ds files generated when we created the keys.
# The DS record points to the KSK only. Write this next to the zone file so we can
# get it later to give to the user with instructions on what to do with it.
#
# We want to be able to validate DS records too, but multiple forms may be valid depending
# on the digest type. So we'll write all (both) valid records. Only one DS record should
# actually be deployed. Preferebly the first.
with open("/etc/nsd/zones/" + zonefile + ".ds", "w") as f:
for digest_type in ('2', '1'):
rr_ds = shell('check_output', ["/usr/bin/ldns-key2ds",
"-n", # output to stdout
"-" + digest_type, # 1=SHA1, 2=SHA256
dnssec_keys["KSK"] + ".key"
])
f.write(rr_ds)
# Remove our temporary file.
for fn in files_to_kill:
os.unlink(fn)
########################################################################
def write_opendkim_tables(domains, env):
# Append a record to OpenDKIM's KeyTable and SigningTable for each domain
# that we send mail from (zones and all subdomains).
opendkim_key_file = os.path.join(env['STORAGE_ROOT'], 'mail/dkim/mail.private')
if not os.path.exists(opendkim_key_file):
# Looks like OpenDKIM is not installed.
return False
config = {
# The SigningTable maps email addresses to a key in the KeyTable that
# specifies signing information for matching email addresses. Here we
# map each domain to a same-named key.
#
# Elsewhere we set the DMARC policy for each domain such that mail claiming
# to be From: the domain must be signed with a DKIM key on the same domain.
# So we must have a separate KeyTable entry for each domain.
"SigningTable":
"".join(
"*@{domain} {domain}\n".format(domain=domain)
for domain in domains
),
# The KeyTable specifies the signing domain, the DKIM selector, and the
# path to the private key to use for signing some mail. Per DMARC, the
# signing domain must match the sender's From: domain.
"KeyTable":
"".join(
"{domain} {domain}:mail:{key_file}\n".format(domain=domain, key_file=opendkim_key_file)
for domain in domains
),
}
did_update = False
for filename, content in config.items():
# Don't write the file if it doesn't need an update.
if os.path.exists("/etc/opendkim/" + filename):
with open("/etc/opendkim/" + filename) as f:
if f.read() == content:
continue
# The contents needs to change.
with open("/etc/opendkim/" + filename, "w") as f:
f.write(content)
did_update = True
# Return whether the files changed. If they didn't change, there's
# no need to kick the opendkim process.
return did_update
########################################################################
def set_custom_dns_record(qname, rtype, value, env):
# validate qname
for zone, fn in get_dns_zones(env):
# It must match a zone apex or be a subdomain of a zone
# that we are otherwise hosting.
if qname == zone or qname.endswith("."+zone):
break
else:
# No match.
raise ValueError("%s is not a domain name or a subdomain of a domain name managed by this box." % qname)
# validate rtype
rtype = rtype.upper()
if value is not None:
if rtype in ("A", "AAAA"):
v = ipaddress.ip_address(value)
if rtype == "A" and not isinstance(v, ipaddress.IPv4Address): raise ValueError("That's an IPv6 address.")
if rtype == "AAAA" and not isinstance(v, ipaddress.IPv6Address): raise ValueError("That's an IPv4 address.")
elif rtype in ("CNAME", "TXT", "SRV", "MX"):
# anything goes
pass
else:
raise ValueError("Unknown record type '%s'." % rtype)
# load existing config
config = get_custom_dns_config(env)
# update
if qname not in config:
if value is None:
# Is asking to delete a record that does not exist.
return False
elif rtype == "A":
# Add this record using the short form 'qname: value'.
config[qname] = value
else:
# Add this record. This is the qname's first record.
config[qname] = { rtype: value }
else:
if isinstance(config[qname], str):
# This is a short-form 'qname: value' implicit-A record.
if value is None and rtype != "A":
# Is asking to delete a record that doesn't exist.
return False
elif value is None and rtype == "A":
# Delete record.
del config[qname]
elif rtype == "A":
# Update, keeping short form.
if config[qname] == "value":
# No change.
return False
config[qname] = value
else:
# Expand short form so we can add a new record type.
config[qname] = { "A": config[qname], rtype: value }
else:
# This is the qname: { ... } (dict) format.
if value is None:
if rtype not in config[qname]:
# Is asking to delete a record that doesn't exist.
return False
else:
# Delete the record. If it's the last record, delete the domain.
del config[qname][rtype]
if len(config[qname]) == 0:
del config[qname]
else:
# Update the record.
if config[qname].get(rtype) == "value":
# No change.
return False
config[qname][rtype] = value
# serialize & save
write_custom_dns_config(config, env)
return True
########################################################################
def set_secondary_dns(hostname, env):
config = get_custom_dns_config(env)
if hostname in (None, ""):
# Clear.
if "_secondary_nameserver" in config:
del config["_secondary_nameserver"]
else:
# Validate.
hostname = hostname.strip().lower()
resolver = dns.resolver.get_default_resolver()
try:
response = dns.resolver.query(hostname, "A")
except (dns.resolver.NoNameservers, dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
raise ValueError("Could not resolve the IP address of %s." % hostname)
# Set.
config["_secondary_nameserver"] = hostname
# Save and apply.
write_custom_dns_config(config, env)
return do_dns_update(env)
########################################################################
def justtestingdotemail(domain, records):
# If the domain is a subdomain of justtesting.email, which we own,
# automatically populate the zone where it is set up on dns4e.com.
# Ideally if dns4e.com supported NS records we would just have it
# delegate DNS to us, but instead we will populate the whole zone.
import subprocess, json, urllib.parse
if not domain.endswith(".justtesting.email"):
return
for subdomain, querytype, value, explanation in records:
if querytype in ("NS",): continue
if subdomain in ("www", "ns1", "ns2"): continue # don't do unnecessary things
if subdomain == None:
subdomain = domain
else:
subdomain = subdomain + "." + domain
if querytype == "TXT":
# nsd requires parentheses around txt records with multiple parts,
# but DNS4E requires there be no parentheses; also it goes into
# nsd with a newline and a tab, which we replace with a space here
value = re.sub("^\s*\(\s*([\w\W]*)\)", r"\1", value)
value = re.sub("\s+", " ", value)
else:
continue
print("Updating DNS for %s/%s..." % (subdomain, querytype))
resp = json.loads(subprocess.check_output([
"curl",
"-s",
"https://api.dns4e.com/v7/%s/%s" % (urllib.parse.quote(subdomain), querytype.lower()),
"--user", "2ddbd8e88ed1495fa0ec:A97TDJV26CVUJS6hqAs0CKnhj4HvjTM7MwAAg8xb",
"--data", "record=%s" % urllib.parse.quote(value),
]).decode("utf8"))
print("\t...", resp.get("message", "?"))
########################################################################
def build_recommended_dns(env):
ret = []
domains = get_dns_domains(env)
zonefiles = get_dns_zones(env)
additional_records = get_custom_dns_config(env)
for domain, zonefile in zonefiles:
records = build_zone(domain, domains, additional_records, env)
# remove records that we don't dislay
records = [r for r in records if r[3] is not False]
# put Required at the top, then Recommended, then everythiing else
records.sort(key = lambda r : 0 if r[3].startswith("Required.") else (1 if r[3].startswith("Recommended.") else 2))
# expand qnames
for i in range(len(records)):
if records[i][0] == None:
qname = domain
else:
qname = records[i][0] + "." + domain
records[i] = {
"qname": qname,
"rtype": records[i][1],
"value": records[i][2],
"explanation": records[i][3],
}
# return
ret.append((domain, records))
return ret
if __name__ == "__main__":
from utils import load_environment
env = load_environment()
for zone, records in build_recommended_dns(env):
for record in records:
print("; " + record['explanation'])
print(record['qname'], record['rtype'], record['value'], sep="\t")
print()
|
|
from __future__ import annotations
import itertools
import random
import struct
from abc import ABCMeta
from math import cos, sin
from pathlib import Path
from typing import List, Optional, Union
from PIL import Image
import elma.packing
from elma.constants import VERSION_ELMA
from elma.render import LevelRenderer
from elma.utils import null_padded, BoundingBox, check_writable_file
__all__ = [
"Point",
"Obj",
"Picture",
"Polygon",
"Top10Time",
"Top10",
"Level",
"Frame",
"Event",
"ObjectTouchEvent",
"TurnEvent",
"LeftVoltEvent",
"RightVoltEvent",
"GroundTouchEvent",
"AppleTouchEvent",
"Replay",
]
class Point(object):
"""
Represents a single 2D point.
Attributes:
x (float): The x-coordinate of the point.
y (float): The y-coordinate of the point.
"""
def __init__(self, x: float, y: float) -> None:
self.x = x
self.y = y
def __repr__(self) -> str:
return 'Point(x: %s, y: %s)' % (self.x, self.y)
def __eq__(self, other_point: object) -> bool:
if not isinstance(other_point, Point):
return NotImplemented
return self.x == other_point.x and self.y == other_point.y
class Obj(object):
"""
Represents an Elasto Mania level object, which can be one of: flower, food,
killer, start.
Attributes:
point (Point): The 2D Point that represents the position of the object.
type (int): The type of the object, which should be one of:
Obj.FLOWER, Obj.FOOD, Obj.Killer, Obj.START.
gravity (int): The gravity of the object, if the object is a food
object. It should be one of: Obj.GRAVITY_NORMAL, Obj.GRAVITY_UP,
Obj.GRAVITY_DOWN, Obj.GRAVITY_LEFT, Obj,GRAVITY_RIGHT.
animation_number (int): The animation number of the object.
"""
FLOWER = 1
FOOD = 2
KILLER = 3
START = 4
GRAVITY_NORMAL = 0
GRAVITY_UP = 1
GRAVITY_DOWN = 2
GRAVITY_LEFT = 3
GRAVITY_RIGHT = 4
def __init__(self,
point: Point,
type: int,
gravity: int = GRAVITY_NORMAL,
animation_number: int = 1) -> None:
self.point = point
self.type = type
self.gravity = gravity
self.animation_number = animation_number
def __repr__(self) -> str:
return (
'Obj(point: %s, type: %s, gravity: %s, animation_number: %s)' %
(self.point, self.type, self.gravity, self.animation_number))
def __eq__(self, other_obj: object) -> bool:
if not isinstance(other_obj, Obj):
return NotImplemented
return (self.point == other_obj.point and
self.type == other_obj.type and
self.gravity == other_obj.gravity and
self.animation_number == other_obj.animation_number)
class Picture(object):
"""
Represents an Elasto Mania level picture.
Attributes:
point (Point): The 2D Point that represents the position of the object.
picture_name (string): The name of the picture resource to use, without
.PCX, e.g. 'BARREL'.
texture_name (string): The name of the texture resource to use, without
.PCX, e.g. 'STONE1'.
mask_name (string): The name of the texture resource to use, without
.PCX, e.g. 'MASKHOR'.
distance (int): The z-ordering distance of the picture. Should be in
the range 1-999.
clipping (int): The clipping of the picture. Should be one of:
Picture.CLIPPING_U, Picture.CLIPPING_G, Picture.CLIPPING_S.
"""
CLIPPING_U = 0
CLIPPING_G = 1
CLIPPING_S = 2
def __init__(self,
point: Point,
picture_name: str = '',
texture_name: str = '',
mask_name: str = '',
distance: int = 500,
clipping: int = CLIPPING_U) -> None:
self.point = point
self.picture_name = picture_name
self.texture_name = texture_name
self.mask_name = mask_name
self.distance = distance
self.clipping = clipping
def __repr__(self) -> str:
return (
('Picture(point: %s, picture_name: %s, texture_name: %s,' +
'mask_name: %s, distance: %s, clipping: %s)') %
(self.point, self.picture_name, self.texture_name,
self.mask_name, self.distance, self.clipping))
def __eq__(self, other_picture: object) -> bool:
if not isinstance(other_picture, Picture):
return NotImplemented
return (self.point == other_picture.point and
self.picture_name == other_picture.picture_name and
self.texture_name == other_picture.texture_name and
self.mask_name == other_picture.mask_name and
self.distance == other_picture.distance and
self.clipping == other_picture.clipping)
class Polygon(object):
"""
Represents an Elasto Mania level polygon.
Attributes:
points (list): A list of Points defining the polygon contour.
grass (boolean): A boolean deciding whether or not the polygon is a
grass polygon.
"""
def __init__(self, points: List[Point], grass: bool = False) -> None:
self.points = points
self.grass = grass
def __repr__(self) -> str:
return 'Polygon(points: %s, grass: %s)' % (self.points, self.grass)
def __eq__(self, other_polygon: object) -> bool:
if not isinstance(other_polygon, Polygon):
return NotImplemented
return (self.points == other_polygon.points and
self.grass == other_polygon.grass)
def move_by(self, x: float = 0, y: float = 0) -> None:
self.points = [Point(p.x + x, p.y + y) for p in self.points]
def mirror(self) -> None:
mirror_axis = (self.rightmost_point().x +
self.leftmost_point().x) / 2.0
for p in self.points:
p.x = 2 * mirror_axis - p.x
def flip(self) -> None:
flip_axis = (self.highest_point().y + self.lowest_point().y) / 2.0
for p in self.points:
p.y = 2 * flip_axis - p.y
def rotate(self, angle: float, fixed_point: Optional[Point] = None) -> None:
if fixed_point is None:
fixed_point = self.center_point()
for p in self.points:
norm_x = p.x - fixed_point.x
norm_y = p.y - fixed_point.y
p.x = norm_x * cos(angle) - norm_y * sin(angle) + fixed_point.x
p.y = norm_x * sin(angle) + norm_y * cos(angle) + fixed_point.y
def scale(self, scaler: float) -> None:
fixed_point = Point(self.leftmost_point().x, self.lowest_point().y)
for p in self.points:
p.x = scaler * (p.x - fixed_point.x) + fixed_point.x
p.y = scaler * (p.y - fixed_point.y) + fixed_point.y
def center_point(self) -> Point:
center = Point(0.0, 0.0)
for p in self.points:
center.x += p.x
center.y += p.y
center.x /= len(self.points)
center.y /= len(self.points)
return center
def rightmost_point(self) -> Point:
return max(self.points, key=lambda p: p.x)
def leftmost_point(self) -> Point:
return min(self.points, key=lambda p: p.x)
def highest_point(self) -> Point:
return max(self.points, key=lambda p: p.y)
def lowest_point(self) -> Point:
return min(self.points, key=lambda p: p.y)
def area(self) -> float:
"""
Returns the area of the polygon
"""
return abs(self._signed_area())
def is_ordered_clockwise(self) -> bool:
"""
Returns True if the points of the polygon are ordered clockwise and
False if counterclockwise.
"""
return self._signed_area() < 0
def is_filled(self) -> bool:
"""
Returns True if the interior of the polygon is filled.
"""
return not self.is_ordered_clockwise()
def _signed_area(self) -> float:
"""
Returns the signed area of the polygon.
"""
points = self.points
area = (points[0].x - points[-1].x) * (points[0].y + points[-1].y)
for i in range(len(self.points) - 1):
area += (points[i + 1].x - points[i].x) * (points[i + 1].y + points[i].y)
return area / 2
class Top10Time(object):
"""
Represents a top10 time.
Attributes:
time (int): The finished time in hundredths.
kuski (string): The name of the first player.
kuski2 (string): The name of the second player. Should equal kuski for
singleplayer times.
is_multi (boolean): Whether or not the time is a multiplayer time.
"""
def __init__(self,
time: int,
kuski: str,
kuski2: Optional[str] = None,
is_multi: bool = False) -> None:
self.time = time
self.kuski = kuski
self.kuski2 = kuski if kuski2 is None else kuski2
self.is_multi = is_multi
def __repr__(self) -> str:
if self.is_multi:
return ('Top10Time(time: %s, kuski: %s, kuski2: %s)' %
(self.time, self.kuski, self.kuski2))
else:
return 'Top10Time(time: %s, kuski: %s)' % (self.time, self.kuski)
def __eq__(self, other_time: object) -> bool:
if not isinstance(other_time, Top10Time):
return NotImplemented
return (self.time == other_time.time and
self.kuski == other_time.kuski and
self.kuski2 == other_time.kuski2)
class Top10(object):
"""
Represents the complete top10 of a level with both singleplayer
and multiplayer times, up to 10 times of each.
Attributes:
single (list): A list of up to 10 singleplayer Top10Times.
multi (list): A list of up to 10 multiplayer Top10Times.
"""
def __init__(self) -> None:
self.single: List[Top10Time] = []
self.multi: List[Top10Time] = []
def __repr__(self) -> str:
return 'Top10(single: %s, multi: %s)' % (self.single, self.multi)
def sort(self) -> None:
self.single = sorted(self.single, key=lambda t: t.time)[:10]
self.multi = sorted(self.multi, key=lambda t: t.time)[:10]
def merge(self, other_top10: Top10) -> None:
for s in self.single:
for o in other_top10.single:
if s.time == o.time and s.kuski == o.kuski:
other_top10.single.remove(o)
break
self.single.extend([o for o in other_top10.single])
for s in self.multi:
for o in other_top10.multi:
if (s.time == o.time and s.kuski == o.kuski and
s.kuski2 == o.kuski2):
other_top10.multi.remove(o)
break
self.multi.extend([o for o in other_top10.multi])
self.sort()
def to_buffer(self) -> bytes:
self.sort()
return b''.join([
struct.pack('I', len(self.single)),
b''.join([struct.pack('I', t.time) for t in self.single]),
b''.join([struct.pack('I', 0)
for _ in range(10 - len(self.single))]),
b''.join([null_padded(t.kuski, 15) for t in self.single]),
b''.join([null_padded('', 15)
for _ in range(10 - len(self.single))]),
b''.join([null_padded(t.kuski2, 15) for t in self.single]),
b''.join([null_padded('', 15)
for _ in range(10 - len(self.single))]),
struct.pack('I', len(self.multi)),
b''.join([struct.pack('I', t.time) for t in self.multi]),
b''.join([struct.pack('I', 0)
for _ in range(10 - len(self.multi))]),
b''.join([null_padded(t.kuski, 15) for t in self.multi]),
b''.join([null_padded('', 15)
for _ in range(10 - len(self.multi))]),
b''.join([null_padded(t.kuski2, 15) for t in self.multi]),
b''.join([null_padded('', 15)
for _ in range(10 - len(self.multi))]),
])
class Level(object):
"""
Represents an Elasto Mania level.
Attributes:
version (string): VERSION_ELMA ('POT14') or VERSION_ACROSS ('POT06').
polygons (list): A list of Polygons in the level.
objects (list): A list of Objects in the level.
pictures (list): A list of Pictures in the level.
level_id (int): A unique unsigned 32bit integer level identifier.
name (string): The name of level, which should be no longer than 50
characters long.
lgr (string): The name of the LGR used for this level, which should be
no longer than 10 characters long.
ground_texture (string): The name of the ground texture used for this
level, which should be no longer than 10 characters long.
sky_texture (string): The name of the sky texture used for this level,
which should be no longer than 10 characters long.
top10 (Top10): A Top10 for the level.
preserve_integrity_values (boolean): Whether or not to unpack and
preserve the existing integrity values, instead of recomputing
them when packing.
integrity (list): A list of four integrity values read from an existing
level. Empty, if preserve_integrity_values is False.
"""
def __init__(self) -> None:
self.version = VERSION_ELMA
self.polygons: List[Polygon] = []
self.objects: List[Obj] = []
self.pictures: List[Picture] = []
self.level_id = random.randint(0, (2 ** 32) - 1)
self.name = 'Unnamed'
self.lgr = 'DEFAULT'
self.ground_texture = 'ground'
self.sky_texture = 'sky'
self.top10 = Top10()
self.preserve_integrity_values = False
self.integrity: List[float] = []
@property
def ground_polygons(self) -> List[Polygon]:
"""
Returns all non-grass polygons of the level.
"""
return [polygon for polygon in self.polygons if not polygon.grass]
@property
def grass_polygons(self) -> List[Polygon]:
"""
Returns all grass polygons of the level.
"""
return [polygon for polygon in self.polygons if polygon.grass]
def as_image(self,
*,
max_width: Optional[int] = LevelRenderer.DEFAULT_WIDTH,
max_height: Optional[int] = LevelRenderer.DEFAULT_HEIGHT,
scale: Optional[float] = None,
padding: int = LevelRenderer.DEFAULT_PADDING,
render_objects: bool = True,
show: bool = False) -> Image:
"""
Render image of the level.
Args:
max_width: max width of the image (ignored if scale is provided)
max_height: max height of the image (ignored if scale is provided)
scale: scaling factor to convert level coordinates to pixels,
overrides max_width and max_height
padding: space around the image in pixels
render_objects: render both objects and polygons if True,
else render only polygons
show: show rendered image if True
"""
if scale:
renderer = LevelRenderer.with_scale(level=self, scale=scale, padding=padding)
else:
renderer = LevelRenderer(level=self, max_width=max_width, max_height=max_height, padding=padding)
if show:
renderer.show(render_objects=render_objects)
return renderer.render(render_objects=render_objects)
def min_x(self) -> float:
"""
Returns the minimum x coordinate of all vertices, pictures and objects.
"""
return min([p.x for p in self._all_points()])
def max_x(self) -> float:
"""
Returns the maximum x coordinate of all vertices, pictures and objects.
"""
return max([p.x for p in self._all_points()])
def min_y(self) -> float:
"""
Returns the minimum y coordinate of all vertices, pictures and objects.
"""
return min([p.y for p in self._all_points()])
def max_y(self) -> float:
"""
Returns the maximum y coordinate of all vertices, pictures and objects.
"""
return max([p.y for p in self._all_points()])
def bounding_box(self) -> BoundingBox:
"""
Returns the bounding box of the level.
"""
return BoundingBox(self.min_x(), self.max_x(), self.min_y(), self.max_y())
def save(self, file: Union[str, Path], allow_overwrite: bool = False, create_dirs: bool = False) -> None:
"""
Save level to a file
Args:
file: path to the file
allow_overwrite: allow overwriting an existing file
create_dirs: create non-existing parent directories of the file
Raises:
FileExistsError: if file exists and allow_overwrite = False
FileNotFoundError: if parent directory of the file does not exists
and create_dirs = False
"""
file = Path(file)
check_writable_file(file, exist_ok=allow_overwrite, create_dirs=create_dirs)
file.write_bytes(self.pack())
@classmethod
def load(cls, file: Union[str, Path]) -> Level:
"""
Load level from a file
Args:
file: path to a file containing an Elasto Mania level
Returns:
Level object unpacked from the file
Raises:
FileNotFoundError: if the file does not exists
"""
file = Path(file)
if not file.exists():
raise FileNotFoundError(f"File {file} not found.")
level = cls.unpack(file.read_bytes())
return level
def pack(self) -> bytes:
"""
Pack level to its binary representation readable by Elasto Mania
Returns:
Packed level as bytes
"""
is_elma = self.version == VERSION_ELMA
packed_level = elma.packing.pack_level(self, is_elma=is_elma)
return packed_level
@classmethod
def unpack(cls, packed_level: bytes) -> Level:
"""
Unpack level from its binary representation readable by Elasto Mania
Args:
packed_level: packed level as bytes
Returns:
Unpacked Level object
"""
level = elma.packing.unpack_level(packed_level)
return level
def __repr__(self) -> str:
return (('Level(level_id: %s, name: %s, lgr: %s, ' +
'ground_texture: %s, sky_texture: %s)') %
(self.level_id, self.name, self.lgr,
self.ground_texture, self.sky_texture))
def __eq__(self, other_level: object) -> bool:
if not isinstance(other_level, Level):
return NotImplemented
# level_id, integrity and name can differ
return (self.polygons == other_level.polygons and
self.objects == other_level.objects and
self.pictures == other_level.pictures)
def _all_points(self) -> List[Point]:
"""
Returns all coordinate points of vertices, pictures and level objects.
"""
vertex_points = list(itertools.chain(*[polygon.points for polygon in self.polygons]))
picture_points = [pic.point for pic in self.pictures]
object_points = [obj.point for obj in self.objects]
return vertex_points + picture_points + object_points
class Frame(object):
"""
Represents a single replay frame.
Attributes:
position (Point): The position of the kuski in this frame in level
coordinates.
left_wheel_position (Point): The position of the bike's left wheel in
this frame relative to the position of the kuski.
right_wheel_position (Point): The position of the bike's right wheel in
this frame relative to the position of the kuski.
head_position (point): The position of the kuski's head in this frame
relative to the position of the kuski.
rotation (int): The rotation of the kuski in 10000ths of a radian.
left_wheel_rotation (int): The rotation of the bike's left wheel in
249/2/pi-ths of a radian.
right_wheel_rotation (int): The rotation of the bike's right wheel in
249/2/pi-ths of a radian.
is_gasing (boolean): Whether or not the bike is gasing in this frame.
is_turned_right (boolean): Whether or not the bike is turned right in
this frame.
spring_sound_effect_volume (int): The spring sound effect volume for
this frame.
"""
def __init__(self):
self.position = Point(0, 0)
self.left_wheel_position = Point(0, 0)
self.right_wheel_position = Point(0, 0)
self.head_position = Point(0, 0)
self.rotation = 0
self.left_wheel_rotation = 0
self.right_wheel_rotation = 0
self.is_gasing = False
self.is_turned_right = False
self.spring_sound_effect_volume = 0
# Needed to preserve unknown bits from rec files
self._gas_and_turn_state = 0
def __repr__(self) -> str:
return ('Frame(position: %s, left_wheel_position: %s, ' +
'right_wheel_position: %s, head_position: %s, ' +
'rotation: %s, left_wheel_rotation: %s, ' +
'right_wheel_rotation: %s, is_gasing: %s, ' +
'is_turned_right: %s, spring_sound_effect_volume: %s)') % (
self.position,
self.left_wheel_position,
self.right_wheel_position,
self.head_position,
self.rotation,
self.left_wheel_rotation,
self.right_wheel_rotation,
self.is_gasing,
self.is_turned_right,
self.spring_sound_effect_volume)
class Event(object):
"""
Abstract base representation of a single replay event.
Attributes:
time (float): The time at which the event occurs, given in
0.001/(0.182*0.0024)ths of a second.
"""
__metaclass__ = ABCMeta
def __init__(self) -> None:
self.time = 0.0
def __repr__(self) -> str:
return '%s(time: %s)' % (self.__class__.__name__, self.time)
class ObjectTouchEvent(Event):
"""
Represents a single replay object touch event.
Attributes:
object_number (int): Index number of the touched object
"""
def __init__(self) -> None:
super().__init__()
self.object_number = 0
def __repr__(self) -> str:
return '%s(time: %s, object_number: %s)' % (
self.__class__.__name__, self.time, self.object_number)
class TurnEvent(Event):
"""
Represents a single replay turn event.
"""
class LeftVoltEvent(Event):
"""
Represents a single replay left volt event.
"""
class RightVoltEvent(Event):
"""
Represents a single replay right volt event.
"""
class GroundTouchEvent(Event):
"""
Represents a single replay ground touch event.
Attributes:
event_sound_volume (float): The volume of the caused by the impact of
touching the ground within range [0, 0.99].
"""
def __init__(self) -> None:
super().__init__()
self.event_sound_volume = 0.0
class AppleTouchEvent(Event):
"""
Represents an apple touch event.
This is always generated together with the ObjectTouchEvent when touching an apple.
"""
class Replay(object):
"""
Represents an Elasto Mania replay.
Attributes:
is_finished (boolean): Whether or not the replay is (probably) finished.
is_multi (boolean): Whether or not the replay is a multiplayer replay.
is_flagtag (boolean): Whether or not the replay is a flagtag replay.
level_id (int): The unique identifier of the level this replay is from.
level_name (string): The name of the level this replay is from.
frames (list): The frames of this replay.
events (list): The events of this replay.
time (float): The time of this replay in seconds.
"""
def __init__(self) -> None:
self.is_finished = False
self.is_multi = False
self.is_flagtag = False
self.level_id = 0
self.level_name = ''
self.frames: List[Frame] = []
self.events: List[Event] = []
self.time = 0.0
def save(self, file: Union[str, Path], allow_overwrite: bool = False, create_dirs: bool = False) -> None:
"""
Save replay to a file
Args:
file: path to the file
allow_overwrite: allow overwriting an existing file
create_dirs: create non-existing parent directories of the file
Raises:
FileExistsError: if file exists and allow_overwrite = False
FileNotFoundError: if parent directory of the file does not exists
and create_dirs = False
"""
file = Path(file)
check_writable_file(file, exist_ok=allow_overwrite, create_dirs=create_dirs)
file.write_bytes(self.pack())
@classmethod
def load(cls, file: Union[str, Path]) -> Replay:
"""
Load replay from a file
Args:
file: path to a file containing an Elasto Mania replay
Returns:
Replay object unpacked from the file
Raises:
FileNotFoundError: if the file does not exist
"""
file = Path(file)
if not file.exists():
raise FileNotFoundError(f"File {file} not found.")
replay = cls.unpack(file.read_bytes())
return replay
def pack(self) -> bytes:
"""
Pack replay to its binary representation readable by Elasto Mania
Returns:
Packed replay as bytes
"""
packed_replay = elma.packing.pack_replay(self)
return packed_replay
@classmethod
def unpack(cls, packed_replay: bytes) -> Replay:
"""
Unpack replay from its binary representation readable by Elasto Mania
Args:
packed_replay: packed replay as bytes
Returns:
Unpacked Replay object
"""
replay = elma.packing.unpack_replay(packed_replay)
return replay
def __repr__(self) -> str:
return (
'Replay(is_multi: %s, is_flagtag: %s, level_id: %s, ' +
'level_name: %s, len(frames): %s, len(events): %s)') % (
self.is_multi, self.is_flagtag, self.level_id, self.level_name,
len(self.frames), len(self.events))
|
|
# coding: utf-8
from __future__ import unicode_literals, print_function
from copy import deepcopy
import oar.kao.scheduling
from oar.lib import config
from oar.lib.interval import (intersec, itvs_size, extract_n_scattered_block_itv,
aggregate_itvs, ordered_ids2itvs)
import pickle
import os
try:
import zerorpc
except ImportError:
zerorpc = None
# Set undefined config value to default one
config.setdefault_config({
'COORM_DEFAULT_TIMEOUT': 10,
})
def find_default(itvs_avail, hy_res_rqts, hy, beginning, *find_args, **find_kwargs):
"""Simple wrap function to default function for test purpose"""
return oar.kao.scheduling.find_resource_hierarchies_job(itvs_avail, hy_res_rqts, hy)
def assign_default(slots_set, job, hy, min_start_time, *assign_args, **assign_kwargs):
"""Simple wrap function to default function for test purpose"""
return oar.kao.scheduling.assign_resources_mld_job_split_slots(slots_set, job, hy, min_start_time)
def find_begin(itvs_avail, hy_res_rqts, hy, beginning, *find_args, **find_kwargs):
"""Simple function to test beginning value which is set to True is the slot begins the slotset (slotset.begin == slots[1].b).
It's only for test/example purpose"""
if beginning:
return oar.kao.scheduling.find_resource_hierarchies_job(itvs_avail, hy_res_rqts, hy)
else:
return [(1,16)]
def find_treematch(itvs_avail, hy_res_rqts, hy, beginning, matrix_file ):
res_set = oar.kao.scheduling.find_resource_hierarchies_job(itvs_avail,hy_res_rqts,hy)
if res_set == []:
return []
if beginning:
here_path = os.path.dirname(os.path.realpath(__file__))
mapping_exe = " {}/../../treematch-0.3.0/src/treematch/mapping".format(here_path)
topo_file = " {}/../../treematch-0.3.0/topo.tgt".format(here_path)
restriction_file = "{}/../../treematch-0.3.0/tmp/restrictions.bind".format(here_path)
#create a temporary restriction file
with open(restriction_file, "w") as rf:
for (a,b) in itvs_avail:
for i in range(a,b+1):
rf.write(str(i) + " ")
cmd = "{} -t {} -c {} -b {}".format(mapping_exe,topo_file,matrix_file,restriction_file)
row_result = os.popen(cmd).read()
os.remove(restriction_file)
if(row_result == ""):
print("find_treematch : invalid arguments")
return []
elif (row_result[0:9] != "TreeMatch"):
print(row_result)
return []
#condition the row result from treematch
row_result = (row_result.split(':')[1]).split(',')
row_result = [int(val) for val in row_result]
return ordered_ids2itvs(row_result)
else:
return res_set
def find_contiguous_1h(itvs_avail, hy_res_rqts, hy, beginning):
# NOT FOR PRODUCTION USE
# Notes support only one resource group and ordered resource_id hierarchy level
result = []
hy_level_nbs, constraints = hy_res_rqts[0] # one resource group
l_name, n = hy_level_nbs[0] # one hierarchy level
# hy_level = hy[l_name]
itvs_cts_slots = aggregate_itvs(intersec(constraints, itvs_avail))
if l_name == "resource_id":
for itv in itvs_cts_slots:
if (itv[1] - itv[0] + 1) >= n:
result = [(itv[0], itv[0]+n-1)]
break
return result
def find_contiguous_sorted_1h(itvs_avail, hy_res_rqts, hy, beginning):
# NOT FOR PRODUCTION USE
# Notes support only one resource group and ordered resource_id hierarchy level
result = []
hy_level_nbs, constraints = hy_res_rqts[0] # one resource group
l_name, n = hy_level_nbs[0] # one hierarchy level
# hy_level = hy[l_name]
itvs_unsorted = aggregate_itvs(intersec(constraints, itvs_avail))
lg = len(itvs_unsorted)
ids_sorted = sorted(range(lg), key=lambda k: itvs_unsorted[k][1] - itvs_unsorted[k][0])
if l_name == "resource_id":
for i in ids_sorted:
itv = itvs_unsorted[i]
if (itv[1] - itv[0] + 1) >= n:
result = [(itv[0], itv[0]+n-1)]
break
return result
#
# LOCAL
#
def find_resource_n_h_local(itvs, hy, rqts, top, h, h_bottom):
n = rqts[h+1]
size_bks = []
avail_bks = []
for top_itvs in top:
avail_itvs = intersec(top_itvs, itvs)
avail_bks.append(avail_itvs)
size_bks.append(itvs_size(avail_itvs))
sorted_ids = sorted(range(len(avail_bks)), key=lambda k: size_bks[k])
for i, idx in enumerate(sorted_ids):
if size_bks[i] >= n:
res_itvs = []
k = 0
for itv in avail_bks[idx]:
size_itv = itv[1] - itv[0] + 1
if (k + size_itv) < n:
res_itvs.append(itv)
else:
res_itvs.append((itv[0], itv[0] + (n-k-1)))
return res_itvs
return []
def find_resource_hierarchies_scattered_local(itvs, hy, rqts):
l_hy = len(hy)
# print "find itvs: ", itvs, rqts[0]
if (l_hy == 1):
return extract_n_scattered_block_itv(itvs, hy[0], rqts[0])
else:
return find_resource_n_h_local(itvs, hy, rqts, hy[0], 0, l_hy)
def find_local(itvs_slots, hy_res_rqts, hy, beginning):
""" 2 Level of Hierarchy supported with sorting by increasing blocks' size"""
result = []
for hy_res_rqt in hy_res_rqts:
(hy_level_nbs, constraints) = hy_res_rqt
hy_levels = []
hy_nbs = []
for hy_l_n in hy_level_nbs:
(l_name, n) = hy_l_n
hy_levels.append(hy[l_name])
hy_nbs.append(n)
itvs_cts_slots = intersec(constraints, itvs_slots)
res = find_resource_hierarchies_scattered_local(itvs_cts_slots, hy_levels, hy_nbs)
if res:
result.extend(res)
else:
return []
return result
def assign_one_time_find_mld(slots_set, job, hy, min_start_time):
''''''
# NOT FOR PRODUCTION USE
flag_find = True
prev_t_finish = 2 ** 32 - 1 # large enough
prev_res_set = []
prev_res_rqt = []
slots = slots_set.slots
prev_start_time = slots[1].b
for res_rqt in job.mld_res_rqts:
mld_id, walltime, hy_res_rqts = res_rqt
res_set, sid_left, sid_right = oar.kao.scheduling.find_first_suitable_contiguous_slots(
slots_set, job, res_rqt, hy, min_start_time)
if res_set == []: # no suitable time*resources found
job.res_set = []
job.start_time = -1
job.moldable_id = -1
return
# print("after find fisrt suitable")
t_finish = slots[sid_left].b + walltime
if (t_finish < prev_t_finish):
prev_start_time = slots[sid_left].b
prev_t_finish = t_finish
prev_res_set = res_set
prev_res_rqt = res_rqt
prev_sid_left = sid_left
prev_sid_right = sid_right
if flag_find:
# Next round will moldable we used default find function
# oar.kao.scheduling.find_resource_hierarchies_job
flag_find = False
job.find = False
job.find = True # If job is not reload for next schedule round
(mld_id, walltime, hy_res_rqts) = prev_res_rqt
job.moldable_id = mld_id
job.res_set = prev_res_set
job.start_time = prev_start_time
job.walltime = walltime
# Take avantage of job.starttime = slots[prev_sid_left].b
# print(prev_sid_left, prev_sid_right, job.moldable_id , job.res_set,)
# job.start_time , job.walltime, job.mld_id
slots_set.split_slots(prev_sid_left, prev_sid_right, job)
# returns value other than None value to indicate successful assign
return prev_sid_left, prev_sid_right, job
def assign_one_time_find(slots_set, job, hy, min_start_time):
''''''
# NOT FOR PRODUCTION USE
flag_find = True
prev_t_finish = 2 ** 32 - 1 # large enough
prev_res_set = []
prev_res_rqt = []
slots = slots_set.slots
prev_start_time = slots[1].b
res_rqt = job.mld_res_rqts[0]
res_rqt_copy = deepcopy(res_rqt) # to keep set of intervals
while True:
mld_id, walltime, hy_res_rqts = res_rqt
res_set, sid_left, sid_right = oar.kao.scheduling.find_first_suitable_contiguous_slots(
slots_set, job, res_rqt, hy, min_start_time)
if res_set == []: # no suitable time*resources found
job.res_set = []
job.start_time = -1
job.moldable_id = -1
return
# print("after find fisrt suitable")
t_finish = slots[sid_left].b + walltime
if (t_finish < prev_t_finish):
prev_start_time = slots[sid_left].b
prev_t_finish = t_finish
prev_res_set = res_set
prev_res_rqt = res_rqt
prev_sid_left = sid_left
prev_sid_right = sid_right
if flag_find:
# Next round will moldable we used default find function
# oar.kao.scheduling.find_resource_hierarchies_job
flag_find = False
job.find = False
res_rqt = res_rqt_copy
else:
break
job.find = True # If job is not reload for next schedule round
(mld_id, walltime, hy_res_rqts) = prev_res_rqt
job.moldable_id = mld_id
job.res_set = prev_res_set
job.start_time = prev_start_time
job.walltime = walltime
# Take avantage of job.starttime = slots[prev_sid_left].b
# print(prev_sid_left, prev_sid_right, job.moldable_id , job.res_set,)
# job.start_time , job.walltime, job.mld_id
slots_set.split_slots(prev_sid_left, prev_sid_right, job)
# returns value other than None value to indicate successful assign
return prev_sid_left, prev_sid_right, job
def find_coorm(itvs_avail, hy_res_rqts, hy, beginning, *find_args, **find_kwargs):
if zerorpc is None:
return find_default(itvs_avail, hy_res_rqts, hy)
c = zerorpc.Client()
protocol, ip, port = find_args[:3]
c.connect("%s://%s:%s" % (protocol, ip, port))
return c.find_resource_hierarchies(
itvs_avail,
hy_res_rqts,
hy,
)
def assign_coorm(slots_set, job, hy, min_start_time,
*assign_args, **assign_kwargs):
if zerorpc is None:
return assign_default(slots_set, job, hy, min_start_time)
timeout = assign_kwargs.get('timeout', None)
if timeout is not None and timeout.isdigit():
default_timeout = int(timeout)
if config['COORM_DEFAULT_TIMEOUT'] < default_timeout:
default_timeout = config['COORM_DEFAULT_TIMEOUT']
else:
default_timeout = None
# Init connetion with COORM application
c = zerorpc.Client(timeout=default_timeout)
protocol, ip, port = assign_args[:3]
c.connect("%s://%s:%s" % (protocol, ip, port))
# Convert the job to dict, to preserve values after de/serialisation
missing = object()
dict_job = job.to_dict()
for k in ('mld_res_rqts', 'key_cache', 'ts', 'ph', 'find'):
if getattr(job, k, missing) is not missing:
dict_job[k] = getattr(job, k)
# Remote call
prev_sid_left, prev_sid_right, dict_job = c.assign_resources(
pickle.dumps(slots_set),
dict_job,
hy,
min_start_time
)
# Propagate modified job values outside
for k in ('moldable_id', 'res_set', 'start_time', 'walltime'):
if k in dict_job:
setattr(job, k, dict_job.get(k))
# Split SlotSet to add our reservation
slots_set.split_slots(prev_sid_left, prev_sid_right, job)
return prev_sid_left, prev_sid_right, job
#
#
#
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapVectorization optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
def _get_test_datasets(self,
base_dataset,
map_fn,
num_parallel_calls=None,
expect_optimized=True):
"""Given base dataset and map fn, creates test datasets.
Returns a tuple of (unoptimized, dataset, optimized dataset). The
unoptimized dataset has the assertion that Batch follows Map. The optimized
dataset has the assertion that Map follows Batch, and has the
"map_vectorization" optimization applied.
Args:
base_dataset: Input dataset to map->batch
map_fn: Map function to use
num_parallel_calls: (Optional.) num_parallel_calls argument for map
expect_optimized: (Optional.) Whether we expect the optimization to take
place, in which case we will assert that Batch is followed by Map,
otherwise Map followed by Batch. Defaults to True.
Returns:
Tuple of (unoptimized dataset, optimized dataset).
"""
map_node_name = "Map" if num_parallel_calls is None else "ParallelMap"
batch_size = 100
def _make_dataset(node_names):
return base_dataset.apply(optimization.assert_next(node_names)).map(
map_fn, num_parallel_calls=num_parallel_calls).batch(batch_size)
unoptimized = _make_dataset([map_node_name, "Batch"])
optimized = _make_dataset(["Batch", map_node_name] if expect_optimized else
[map_node_name, "Batch"]).apply(
optimization.optimize(["map_vectorization"]))
return unoptimized, optimized
@parameterized.named_parameters(
("Basic", lambda x: (x, x + 1), None),
("Parallel", lambda x: (x, x + 1), 12),
("Gather", lambda x: array_ops.gather(x, 0), 12),
)
def testOptimization(self, map_fn, num_parallel_calls):
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(base_dataset, map_fn,
num_parallel_calls)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationBadMapFn(self):
# Test map functions that give an error
def map_fn(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch(
5, drop_remainder=True)
_, optimized = self._get_test_datasets(base_dataset, map_fn)
nxt = optimized.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(nxt)
def testOptimizationWithCapturedInputs(self):
# Tests that vectorization works with captured inputs
def map_fn(x):
return x + y
y = constant_op.constant(1, shape=(2,))
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
# TODO(rachelim): when this optimization works, turn on expect_optimized
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsEqual(optimized, unoptimized)
def testOptimizationIgnoreStateful(self):
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsRaiseSameError(
unoptimized, optimized, errors.InvalidArgumentError,
[("OneShotIterator", "OneShotIterator_1", 1),
("IteratorGetNext", "IteratorGetNext_1", 1)])
def testOptimizationIgnoreRagged(self):
# Make sure we ignore inputs that might not be uniformly sized
def map_fn(x):
return array_ops.gather(x, 0)
# output_shape = (?,)
base_dataset = dataset_ops.Dataset.range(20).batch(3, drop_remainder=False)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationIgnoreRaggedMap(self):
# Don't optimize when the output of the map fn shapes are unknown.
def map_fn(x):
return array_ops.tile(x, x)
base_dataset = dataset_ops.Dataset.range(20).batch(1, drop_remainder=True)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsRaiseSameError(
unoptimized, optimized, errors.InvalidArgumentError,
[("OneShotIterator", "OneShotIterator_1", 1),
("IteratorGetNext", "IteratorGetNext_1", 1)])
class MapVectorizationBenchmark(test.Benchmark):
# TODO(rachelim): Add a benchmark for more expensive transformations, such as
# vgg_preprocessing.
def _run(self, x, num_iters=100, name=None):
deltas = []
with session.Session() as sess:
for _ in range(5):
# Warm up session...
sess.run(x)
for _ in range(num_iters):
start = time.time()
sess.run(x)
end = time.time()
deltas.append(end - start)
median_time = np.median(deltas)
self.report_benchmark(iters=num_iters, wall_time=median_time, name=name)
return median_time
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
num_elems = np.prod(input_size)
name_template = "{}__batch_size_{}_input_size_{}_{}"
unoptimized = input_dataset.map(map_fn).batch(batch_size)
unoptimized_op = unoptimized.make_one_shot_iterator().get_next()
optimized = unoptimized.apply(optimization.optimize(["map_vectorization"]))
optimized_op = optimized.make_one_shot_iterator().get_next()
unoptimized_time = self._run(
unoptimized_op,
name=name_template.format(str_id, batch_size, num_elems, "unoptimized"))
optimized_time = self._run(
optimized_op,
name=name_template.format(str_id, batch_size, num_elems, "optimized"))
print("Batch size: {}\n"
"Input size: {}\n"
"Transformation: {}\n"
"Speedup: {}\n".format(batch_size, input_size, str_id,
(unoptimized_time / optimized_time)))
# Known cheap functions
def benchmarkIdentity(self):
self._benchmark_helper(lambda *args: [array_ops.identity(x) for x in args],
"identity")
def benchmarkAddConst(self):
self._benchmark_helper(lambda *args: [x + 1 for x in args], "add_const")
def benchmarkSelect(self):
self._benchmark_helper(lambda *args: args[0], "select")
def benchmarkCast(self):
self._benchmark_helper(
lambda *args: [math_ops.cast(x, dtypes.float64) for x in args], "cast")
def _benchmark_helper(self, map_fn, str_id):
input_sizes = [(10, 10, 3), (10, 100, 300)]
batch_size = 1000
for input_size in input_sizes:
input_dataset = dataset_ops.Dataset.from_tensor_slices(
(np.random.rand(*input_size), np.random.rand(*input_size))).repeat()
self._compare(input_dataset, map_fn, batch_size, input_size, str_id)
if __name__ == "__main__":
test.main()
|
|
"""My purpose in life is to harvest the WEPP env (erosion output) files
The arguments to call me require the SCENARIO to be provided, but you can
also do any of the following:
# See usage
python env2database.py -h
"""
import os
import re
import argparse
import datetime
from multiprocessing import Pool
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
import geopandas as gpd
from rasterstats import zonal_stats
from affine import Affine
from pyiem import dep as dep_utils
from pyiem.util import get_dbconn, logger
LOG = logger()
PRECIP_AFF = Affine(0.01, 0.0, dep_utils.WEST, 0.0, -0.01, dep_utils.NORTH)
CONFIG = {"subset": False}
# Maximum precip value allowed, will alert otherwise, see dailyerosion/dep#65
PRECIP_CEILING = 750.0
def find_huc12s(scenario):
"""yield a listing of huc12s with output!"""
if os.path.isfile("myhucs.txt"):
LOG.warning("Using myhucs.txt to guide processing...")
CONFIG["subset"] = True
return [s.strip() for s in open("myhucs.txt").readlines()]
res = []
for huc8 in os.listdir("/i/%s/env" % (scenario,)):
for huc12 in os.listdir("/i/%s/env/%s" % (scenario, huc8)):
res.append(huc8 + huc12)
return res
def readfile(fn, lengths):
"""Our env reader."""
try:
df = dep_utils.read_env(fn)
except Exception as exp:
LOG.info("ABORT: Attempting to read: %s resulted in: %s", fn, exp)
return None
key = int(fn.split("/")[-1].split(".")[0].split("_")[1])
df["delivery"] = df["sed_del"] / lengths[key]
return df
def determine_dates(args):
"""Convert what `argparser` provided us into a list of dates."""
res = []
dateformat = re.compile("^(?P<yr>[0-9]{4})-(?P<mo>[0-9]+)-(?P<dy>[0-9]+)$")
monthformat = re.compile("^(?P<yr>[0-9]{4})-(?P<mo>[0-9]+)$")
lastdate = datetime.date.today() - datetime.timedelta(days=1)
for date in args.date:
# Option 1: YYYY-m-d
m = dateformat.match(date)
if m:
d = m.groupdict()
dt = pd.Timestamp(
year=int(d["yr"]), month=int(d["mo"]), day=int(d["dy"])
)
if dt not in res:
res.append(dt)
LOG.debug("conv %s to %s", date, res[-1])
continue
# Option 2: all
if date == "all":
res.extend(pd.date_range("2007/01/01", lastdate))
LOG.debug("all dates size %s", len(res))
continue
# Option 3: A month's worth
m = monthformat.match(date)
if m:
d = m.groupdict()
sts = datetime.date(int(d["yr"]), int(d["mo"]), 1)
ets = sts + datetime.timedelta(days=35)
ets = ets.replace(day=1) - datetime.timedelta(days=1)
res.extend(pd.date_range(sts, min([lastdate, ets])))
return res
def compute_res(df, date, slopes, qc_precip):
"""Compute things"""
allhits = slopes == len(df.index)
slopes = float(slopes)
# NB: code was added to WEPP to output every precipitation/runoff event,
# so the average precip here is more accurate than before.
return dict(
date=date,
count=len(df.index),
min_precip=(df.precip.min() if allhits else 0),
avg_precip=(df.precip.sum() / slopes),
max_precip=df.precip.max(),
min_loss=(df.av_det.min() if allhits else 0),
avg_loss=(df.av_det.sum() / slopes),
max_loss=df.av_det.max(),
min_runoff=(df.runoff.min() if allhits else 0),
avg_runoff=(df.runoff.sum() / slopes),
max_runoff=df.runoff.max(),
min_delivery=(df.delivery.min() if allhits else 0),
avg_delivery=(df.delivery.sum() / slopes),
max_delivery=df.delivery.max(),
qc_precip=qc_precip,
)
def load_precip(dates, huc12s):
"""Compute the HUC12 spatially averaged precip
This provides the `qc_precip` value stored in the database for each HUC12,
so that we have complete precipitation accounting as the other database
table fields are based on WEPP output, which does not report all precip
events.
Args:
dates (list<pd.Timestamp>): the dates we need precip data for
huc12s (list): listing of huc12s of interest, use if CONFIG['subset']
Returns:
dict of [date][huc12]
"""
# 1. Build GeoPandas DataFrame of HUC12s of interest
idep = get_dbconn("idep")
huc12df = gpd.GeoDataFrame.from_postgis(
"""
SELECT huc_12, ST_Transform(simple_geom, 4326) as geo
from huc12 WHERE scenario = 0
""",
idep,
index_col="huc_12",
geom_col="geo",
)
if CONFIG["subset"]:
huc12df = huc12df.loc[huc12s]
# 2. Loop over dates
res = {}
progress = tqdm(dates, disable=(not sys.stdout.isatty()))
for date in progress:
progress.set_description(date.strftime("%Y-%m-%d"))
fn = date.strftime("/mnt/idep2/data/dailyprecip/%Y/%Y%m%d.npy")
if not os.path.isfile(fn):
LOG.info("Missing precip: %s", fn)
for huc12 in huc12df.index.values:
d = res.setdefault(huc12, [])
d.append(0)
continue
pcp = np.flipud(np.load(fn))
# nodata here represents the value that is set to missing within the
# source dataset!, setting to zero has strange side affects
zs = zonal_stats(
huc12df["geo"], pcp, affine=PRECIP_AFF, nodata=-1, all_touched=True
)
i = 0
for huc12, _ in huc12df.itertuples():
d = res.setdefault(huc12, [])
if zs[i]["mean"] > PRECIP_CEILING:
LOG.info("%s precip %.2f > QC, zeroing", huc12, zs[i]["mean"])
zs[i]["mean"] = 0.0
d.append(zs[i]["mean"])
i += 1
return res
def load_lengths(scenario):
"""Build out our flowpath lengths."""
sdf = dep_utils.load_scenarios()
idep = get_dbconn("idep")
icursor = idep.cursor()
res = {}
icursor.execute(
"""
SELECT huc_12, fpath, ST_Length(geom) from flowpaths where
scenario = %s
""",
(int(sdf.loc[scenario, "flowpath_scenario"]),),
)
for row in icursor:
d = res.setdefault(row[0], dict())
d[row[1]] = row[2]
return res
def delete_previous_entries(icursor, scenario, huc12, dates):
"""Remove whatever previous data we have for this huc12 and dates"""
if len(dates) > 366:
# Means we are running for 'all'
icursor.execute(
"""
DELETE from results_by_huc12 WHERE
scenario = %s and huc_12 = %s
""",
(scenario, huc12),
)
else:
icursor.execute(
"""
DELETE from results_by_huc12 WHERE
valid in %s and scenario = %s and huc_12 = %s
""",
(tuple(dates), scenario, huc12),
)
return icursor.rowcount
def save_results(icursor, scenario, huc12, df, dates):
"""Save our output to the database"""
inserts = 0
skipped = len(dates) - len(df.index)
for _, row in df.iterrows():
# test both sides of the coin to see that we can indeed skip dumping
# this date to the databse.
if row["qc_precip"] < 0.254 and row["count"] == 0:
skipped += 1
continue
inserts += 1
icursor.execute(
"""
INSERT into results_by_huc12
(huc_12, valid, scenario,
min_precip, avg_precip, max_precip,
min_loss, avg_loss, max_loss,
min_runoff, avg_runoff, max_runoff,
min_delivery, avg_delivery, max_delivery,
qc_precip) VALUES
(%s, %s, %s,
coalesce(%s, 0), coalesce(%s, 0), coalesce(%s, 0),
coalesce(%s, 0), coalesce(%s, 0), coalesce(%s, 0),
coalesce(%s, 0), coalesce(%s, 0), coalesce(%s, 0),
coalesce(%s, 0), coalesce(%s, 0), coalesce(%s, 0), %s)
""",
(
huc12,
row["date"],
scenario,
row["min_precip"],
row["avg_precip"],
row["max_precip"],
row["min_loss"],
row["avg_loss"],
row["max_loss"],
row["min_runoff"],
row["avg_runoff"],
row["max_runoff"],
row["min_delivery"],
row["avg_delivery"],
row["max_delivery"],
row["qc_precip"],
),
)
return inserts, skipped
def update_metadata(scenario, dates):
"""Update database property for this scenario."""
pgconn = get_dbconn("idep")
icursor = pgconn.cursor()
maxdate = max(dates)
icursor.execute(
"""
SELECT value from properties where
key = 'last_date_%s'
"""
% (scenario,)
)
if icursor.rowcount == 0:
icursor.execute(
"""
INSERT into properties(key, value)
values ('last_date_%s', '%s')
"""
% (scenario, maxdate.strftime("%Y-%m-%d"))
)
icursor.execute(
"""
UPDATE properties
SET value = '%s' WHERE key = 'last_date_%s' and value < '%s'
"""
% (
maxdate.strftime("%Y-%m-%d"),
scenario,
maxdate.strftime("%Y-%m-%d"),
)
)
icursor.close()
pgconn.commit()
def do_huc12(arg):
"""Process a huc12's worth of WEPP output files"""
scenario, huc12, lengths, dates, precip = arg
pgconn = get_dbconn("idep", connect_timeout=60, allow_failover=False)
icursor = pgconn.cursor()
basedir = "/i/%s/env/%s/%s" % (scenario, huc12[:8], huc12[8:])
frames = [
readfile(basedir + "/" + f, lengths) for f in os.listdir(basedir)
]
if not frames or any([f is None for f in frames]):
return huc12, None, None, None
# Push all dataframes into one
df = pd.concat(frames)
if df.empty:
LOG.info("FAIL huc12: %s resulted in empty data frame", huc12)
return huc12, None, None, None
df.fillna(0, inplace=True)
hillslopes = len(frames)
rows = []
deleted = delete_previous_entries(icursor, scenario, huc12, dates)
for i, date in enumerate(dates):
# df['date'] is datetime64, so need to cast
df2 = df[df["date"] == pd.Timestamp(date)]
# We have no data, any previous entries were deleted above already
qc_precip = precip[i]
if df2.empty and qc_precip == 0:
continue
# Do computation
rows.append(compute_res(df2, date, hillslopes, qc_precip))
if not rows:
icursor.close()
pgconn.commit()
return huc12, 0, len(dates), deleted
# save results
df = pd.DataFrame(rows)
# Prevent any NaN values
df.fillna(0, inplace=True)
inserts, skipped = save_results(icursor, scenario, huc12, df, dates)
icursor.close()
pgconn.commit()
return huc12, inserts, skipped, deleted
def usage():
"""Create the argparse instance."""
parser = argparse.ArgumentParser("Send WEPP env info to the database")
parser.add_argument("-s", "--scenario", required=True, type=int)
parser.add_argument("-d", "--date", required=True, action="append")
return parser
def main(argv):
"""Go Main Go."""
parser = usage()
args = parser.parse_args(argv[1:])
lengths = load_lengths(args.scenario)
dates = determine_dates(args)
huc12s = find_huc12s(args.scenario)
precip = load_precip(dates, huc12s)
jobs = []
for huc12 in huc12s:
if huc12 not in precip:
LOG.info("Skipping huc12 %s with no precip", huc12)
continue
jobs.append(
[args.scenario, huc12, lengths[huc12], dates, precip[huc12]]
)
# Begin the processing work now!
# NB: Usage of a ThreadPool here ended in tears (so slow)
totalinserts = 0
totalskipped = 0
totaldeleted = 0
with Pool() as pool:
for huc12, inserts, skipped, deleted in tqdm(
pool.imap_unordered(do_huc12, jobs),
total=len(jobs),
disable=(not sys.stdout.isatty()),
):
if inserts is None:
LOG.info("ERROR: huc12 %s returned 0 data", huc12)
continue
totalinserts += inserts
totalskipped += skipped
totaldeleted += deleted
LOG.info(
"env2database.py inserts: %s skips: %s deleted: %s",
totalinserts,
totalskipped,
totaldeleted,
)
update_metadata(args.scenario, dates)
if __name__ == "__main__":
main(sys.argv)
def test_dohuc12():
"""Can we process a huc12"""
lengths = load_lengths(0)
myhuc = "102400130105"
res, _, _, _ = do_huc12(
[0, myhuc, lengths[myhuc], [datetime.date(2014, 9, 9)], [0]]
)
assert res == myhuc
def test_one_date():
"""Can we properly parse dates."""
parser = usage()
args = parser.parse_args(["-s", "0", "--date", "2019-12-03"])
dates = determine_dates(args)
assert len(dates) == 1
def test_dup_date():
"""Can we properly parse dates."""
parser = usage()
args = parser.parse_args(
["-s", "0", "--date", "2019-12-03", "--date", "2019-12-03"]
)
dates = determine_dates(args)
assert len(dates) == 1
def test_one_month():
"""Can we properly one month."""
parser = usage()
args = parser.parse_args(["-s", "0", "--date", "2019-11"])
dates = determine_dates(args)
assert len(dates) == 30
assert dates[0] == pd.Timestamp("2019/11/01")
assert dates[-1] == pd.Timestamp("2019/11/30")
def test_all_dates():
"""Can we properly do all."""
parser = usage()
args = parser.parse_args(["-s", "0", "--date", "all"])
dates = determine_dates(args)
assert len(dates) > 600 # arb
assert dates[0] == pd.Timestamp("2007/01/01")
|
|
services = {
"tcpmux": 1,
"echo": 7,
"discard": 9,
"systat": 11,
"daytime": 13,
"netstat": 15,
"qotd": 17,
"msp": 18,
"chargen": 19,
"ftp-data": 20,
"ftp": 21,
"ssh": 22,
"telnet": 23,
"smtp": 25,
"time": 37,
"nameserver": 42,
"whois": 43,
"tacacs": 49,
"re-mail-ck": 50,
"domain": 53,
"mtp": 57,
"tacacs-ds": 65,
"bootps": 67,
"bootpc": 68,
"gopher": 70,
"rje": 77,
"finger": 79,
"http": 80,
"link": 87,
"kerberos": 88,
"supdup": 95,
"hostnames": 101,
"iso-tsap": 102,
"acr-nema": 104,
"csnet-ns": 105,
"rtelnet": 107,
"pop2": 109,
"pop3": 110,
"sunrpc": 111,
"auth": 113,
"sftp": 115,
"uucp-path": 117,
"nntp": 119,
"ntp": 123,
"pwdgen": 129,
"loc-srv": 135,
"netbios-ns": 137,
"netbios-dgm": 138,
"netbios-ssn": 139,
"imap2": 143,
"snmp": 161,
"snmp-trap": 162,
"cmip-man": 163,
"cmip-agent": 164,
"mailq": 174,
"xdmcp": 177,
"nextstep": 178,
"bgp": 179,
"prospero": 191,
"irc": 194,
"smux": 199,
"at-rtmp": 201,
"at-nbp": 202,
"at-echo": 204,
"at-zis": 206,
"qmtp": 209,
"z3950": 210,
"ipx": 213,
"imap3": 220,
"pawserv": 345,
"zserv": 346,
"fatserv": 347,
"rpc2portmap": 369,
"codaauth2": 370,
"clearcase": 371,
"ulistserv": 372,
"ldap": 389,
"imsp": 406,
"svrloc": 427,
"https": 443,
"snpp": 444,
"microsoft-ds": 445,
"kpasswd": 464,
"urd": 465,
"saft": 487,
"isakmp": 500,
"rtsp": 554,
"nqs": 607,
"npmp-local": 610,
"npmp-gui": 611,
"hmmp-ind": 612,
"qmqp": 628,
"ipp": 631,
"exec": 512,
"login": 513,
"shell": 514,
"printer": 515,
"tempo": 526,
"courier": 530,
"conference": 531,
"netnews": 532,
"gdomap": 538,
"uucp": 540,
"klogin": 543,
"kshell": 544,
"dhcpv6-client": 546,
"dhcpv6-server": 547,
"afpovertcp": 548,
"idfp": 549,
"remotefs": 556,
"nntps": 563,
"submission": 587,
"ldaps": 636,
"tinc": 655,
"silc": 706,
"kerberos-adm": 749,
"webster": 765,
"rsync": 873,
"ftps-data": 989,
"ftps": 990,
"telnets": 992,
"imaps": 993,
"ircs": 994,
"pop3s": 995,
"socks": 1080,
"proofd": 1093,
"rootd": 1094,
"openvpn": 1194,
"rmiregistry": 1099,
"kazaa": 1214,
"nessus": 1241,
"lotusnote": 1352,
"ms-sql-s": 1433,
"ms-sql-m": 1434,
"ingreslock": 1524,
"prospero-np": 1525,
"datametrics": 1645,
"sa-msg-port": 1646,
"kermit": 1649,
"groupwise": 1677,
"l2f": 1701,
"radius": 1812,
"radius-acct": 1813,
"msnp": 1863,
"unix-status": 1957,
"log-server": 1958,
"remoteping": 1959,
"cisco-sccp": 2000,
"search": 2010,
"pipe-server": 2010,
"nfs": 2049,
"gnunet": 2086,
"rtcm-sc104": 2101,
"gsigatekeeper": 2119,
"gris": 2135,
"cvspserver": 2401,
"venus": 2430,
"venus-se": 2431,
"codasrv": 2432,
"codasrv-se": 2433,
"mon": 2583,
"dict": 2628,
"f5-globalsite": 2792,
"gsiftp": 2811,
"gpsd": 2947,
"gds-db": 3050,
"icpv2": 3130,
"iscsi-target": 3260,
"mysql": 3306,
"nut": 3493,
"distcc": 3632,
"daap": 3689,
"svn": 3690,
"suucp": 4031,
"sysrqd": 4094,
"sieve": 4190,
"epmd": 4369,
"remctl": 4373,
"f5-iquery": 4353,
"iax": 4569,
"mtn": 4691,
"radmin-port": 4899,
"rfe": 5002,
"mmcc": 5050,
"sip": 5060,
"sip-tls": 5061,
"aol": 5190,
"xmpp-client": 5222,
"xmpp-server": 5269,
"cfengine": 5308,
"mdns": 5353,
"postgresql": 5432,
"freeciv": 5556,
"amqps": 5671,
"amqp": 5672,
"ggz": 5688,
"x11": 6000,
"x11-1": 6001,
"x11-2": 6002,
"x11-3": 6003,
"x11-4": 6004,
"x11-5": 6005,
"x11-6": 6006,
"x11-7": 6007,
"gnutella-svc": 6346,
"gnutella-rtr": 6347,
"sge-qmaster": 6444,
"sge-execd": 6445,
"mysql-proxy": 6446,
"afs3-fileserver": 7000,
"afs3-callback": 7001,
"afs3-prserver": 7002,
"afs3-vlserver": 7003,
"afs3-kaserver": 7004,
"afs3-volser": 7005,
"afs3-errors": 7006,
"afs3-bos": 7007,
"afs3-update": 7008,
"afs3-rmtsys": 7009,
"font-service": 7100,
"http-alt": 8080,
"bacula-dir": 9101,
"bacula-fd": 9102,
"bacula-sd": 9103,
"xmms2": 9667,
"nbd": 10809,
"zabbix-agent": 10050,
"zabbix-trapper": 10051,
"amanda": 10080,
"dicom": 11112,
"hkp": 11371,
"bprd": 13720,
"bpdbm": 13721,
"bpjava-msvc": 13722,
"vnetd": 13724,
"bpcd": 13782,
"vopied": 13783,
"db-lsp": 17500,
"dcap": 22125,
"gsidcap": 22128,
"wnn6": 22273,
"kerberos4": 750,
"kerberos-master": 751,
"krb-prop": 754,
"krbupdate": 760,
"swat": 901,
"kpop": 1109,
"knetd": 2053,
"eklogin": 2105,
"kx": 2111,
"iprop": 2121,
"supfilesrv": 871,
"supfiledbg": 1127,
"linuxconf": 98,
"poppassd": 106,
"moira-db": 775,
"moira-update": 777,
"spamd": 783,
"omirr": 808,
"customs": 1001,
"skkserv": 1178,
"rmtcfg": 1236,
"wipld": 1300,
"xtel": 1313,
"xtelw": 1314,
"support": 1529,
"cfinger": 2003,
"frox": 2121,
"ninstall": 2150,
"zebrasrv": 2600,
"zebra": 2601,
"ripd": 2602,
"ripngd": 2603,
"ospfd": 2604,
"bgpd": 2605,
"ospf6d": 2606,
"ospfapi": 2607,
"isisd": 2608,
"afbackup": 2988,
"afmbackup": 2989,
"xtell": 4224,
"fax": 4557,
"hylafax": 4559,
"distmp3": 4600,
"munin": 4949,
"enbd-cstatd": 5051,
"enbd-sstatd": 5052,
"pcrd": 5151,
"noclog": 5354,
"hostmon": 5355,
"nrpe": 5666,
"nsca": 5667,
"mrtd": 5674,
"bgpsim": 5675,
"canna": 5680,
"syslog-tls": 6514,
"sane-port": 6566,
"ircd": 6667,
"zope-ftp": 8021,
"tproxy": 8081,
"omniorb": 8088,
"clc-build-daemon": 8990,
"xinetd": 9098,
"git": 9418,
"zope": 9673,
"webmin": 10000,
"kamanda": 10081,
"amandaidx": 10082,
"amidxtape": 10083,
"smsqp": 11201,
"xpilot": 15345,
"sgi-cad": 17004,
"isdnlog": 20011,
"vboxd": 20012,
"binkp": 24554,
"asp": 27374,
"csync2": 30865,
"dircproxy": 57000,
"tfido": 60177,
"fido": 60179,
}
|
|
from django import forms
from model_utils import Choices
from xyberville.apps.users.models import User
from xyberville.apps.keluarga.models import Keluarga
from xyberville.apps.profiles.models import Profile
from xyberville.apps.pekerjaan.models import Pekerjaan
from xyberville.core.utils import (generate_random_string, generate_username)
from xyberville.apps.logs.models import UserLogs
from xyberville.apps.regions.models import Province, Regency, District, Village
from dal import autocomplete, forward
class BaseUserForm(forms.ModelForm):
name = forms.CharField(
label='Nama',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Nama Lengkap'
}
)
)
nik = forms.CharField(
required=False,
label='NIK',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'NIK'
}
)
)
alamat = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
'class': 'form-control',
'placeholder': 'Alamat'
}
)
)
rt = forms.CharField(
required=False,
label='RT',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'RT'
}
)
)
rw = forms.CharField(
required=False,
label='RW',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'RW'
}
)
)
province = forms.ModelChoiceField(
label='Provinsi',
queryset=Province.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:province_autocomplete')
)
regency = forms.ModelChoiceField(
label='Kabupaten / Kota',
queryset=Regency.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:regency_autocomplete',
forward=(forward.Field('province', 'province'),)
)
)
district = forms.ModelChoiceField(
label='Kecamatan',
queryset=District.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:district_autocomplete',
forward=(forward.Field('regency', 'regency'),)
)
)
village = forms.ModelChoiceField(
label='Kelurahan',
queryset=Village.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:village_autocomplete',
forward=(forward.Field('district', 'district'),)
)
)
kode_pos = forms.CharField(
required=False,
label='Kode Pos',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Kode Pos'
}
)
)
kewarganegaraan = forms.ChoiceField(
required=False,
choices=Profile.WNI,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
wna = forms.CharField(
required=False,
label='WNA',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Negara Asal'
}
)
)
paspor = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Paspor'
}
)
)
paspor_kadaluarsa = forms.DateField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'paspor_kadaluarsa',
'placeholder': 'YYYY/MM/DD'
}
)
)
status_penduduk = forms.ChoiceField(
required=False,
label='Penduduk Tetap',
choices=Profile.STATUS_PENDUDUK,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
status_tempat_tinggal = forms.ChoiceField(
required=False,
label='Status Tempat Tinggal',
choices=Profile.STATUS_TEMPAT_TINGGAL,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
kelamin = forms.ChoiceField(
required=False,
choices=Profile.KELAMIN,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
tempat_lahir = forms.ModelChoiceField(
label='Tempat Lahir',
queryset=Regency.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:regency_autocomplete'
)
)
tanggal_lahir = forms.DateField(
required=False,
label='Tanggal Lahir',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'tanggal_lahir',
'data-date-format': 'yyyy-mm-dd'
}
)
)
nomor_akta_lahir = forms.CharField(
required=False,
label='No. Akta Lahir',
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Nomor Akta Lahir'
}
)
)
golongan_darah = forms.ChoiceField(
required=False,
label='Gol. Darah',
choices=Profile.GOLONGAN_DARAH,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
agama = forms.ChoiceField(
required=False,
choices=Profile.AGAMA,
label='Agama',
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
status_pernikahan = forms.ChoiceField(
required=False,
label='Status Pernikahan',
choices=Profile.STATUS_PERNIKAHAN,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
nomor_akta_nikah = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Nomor Akta Nikah'
}
)
)
tanggal_pernikahan = forms.DateField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'tanggal_pernikahan',
'placeholder': 'YYYY-MM-DD',
'data-date-format': 'yyyy-mm-dd'
}
)
)
nomor_akta_cerai = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Nomor Akta Cerai'
}
)
)
tanggal_perceraian = forms.DateField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'tanggal_perceraian',
'placeholder': 'YYYY-MM-DD',
'data-date-format': 'yyyy-mm-dd'
}
)
)
hubungan_keluarga = forms.ChoiceField(
required=False,
choices=Profile.HUBUNGAN_KELUARGA,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
penyandang_cacat = forms.ChoiceField(
required=False,
choices=Profile.CACAT,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
pendidikan = forms.ChoiceField(
required=False,
choices=Profile.PENDIDIKAN,
widget=forms.Select(
attrs={
'class': 'form-control',
}
)
)
pekerjaan = forms.ModelChoiceField(
required=False,
queryset=Pekerjaan.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:pekerjaan_autocomplete')
)
telepon = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'telpon',
'placeholder': 'Telpon'
}
)
)
mobile = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'mobile',
'placeholder': 'Mobile'
}
)
)
keluarga = forms.ModelChoiceField(
required=False,
queryset=Keluarga.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:keluarga_autocomplete')
)
ayah = forms.ModelChoiceField(
required=False,
queryset=User.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:user_autocomplete')
)
ibu = forms.ModelChoiceField(
required=False,
queryset=User.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:user_autocomplete')
)
email = forms.CharField(
required=False,
widget=forms.EmailInput(
attrs={
'class': 'form-control',
'type': 'email',
'placeholder': 'Email'
}
)
)
photo = forms.ImageField(
required=False,
widget=forms.FileInput(
attrs={
'class': 'btn btn-default image-preview-clear'
}
)
)
class Meta:
model = User
fields = ('email', 'name')
def clean(self):
data = super(BaseUserForm, self).clean()
if self.errors:
return data
message = 'Mohon isi Field ini'
kewarganegaraan = data['kewarganegaraan']
wna = data['wna']
paspor = data['paspor']
paspor_kadaluarsa = data['paspor_kadaluarsa']
if kewarganegaraan == '2' and wna == '':
self.add_error('wna', message)
if kewarganegaraan == '2' and paspor == '':
self.add_error('paspor', message)
if paspor and paspor_kadaluarsa == '':
self.add_error('paspor_kadaluarsa', message)
return data
def clean_email(self):
data = super(BaseUserForm, self).clean()
if self.errors:
return data
if data['email'] == '':
data['email'] = None
class WargaCreationForm(BaseUserForm):
def clean(self):
data = super(WargaCreationForm, self).clean()
if self.errors:
return data
return data
def save(self, employee, *args, **kwargs):
kwargs['commit'] = False
data = self.cleaned_data
user = super(WargaCreationForm, self).save(*args, **kwargs)
try:
username = generate_username(name=data['name'],
tanggal_lahir=data['tanggal_lahir'])
except IndexError:
username = generate_random_string(8)
user.username = username
user.set_password(generate_random_string())
user.name = data['name']
user.email = data['email']
user.save()
Profile.objects.create(
user=user, nik=data['nik'], alamat=data['alamat'], rt=data['rt'],
rw=data['rw'], province=data['province'], regency=data['regency'],
district=data['district'], village=data['village'],
kode_pos=data['kode_pos'], kewarganegaraan=data['kewarganegaraan'],
wna=data['wna'], paspor=data['paspor'],
paspor_kadaluarsa=data['paspor_kadaluarsa'],
kelamin=data['kelamin'], tempat_lahir=data['tempat_lahir'],
tanggal_lahir=data['tanggal_lahir'],
nomor_akta_lahir=data['nomor_akta_lahir'],
golongan_darah=data['golongan_darah'], agama=data['agama'],
status_pernikahan=data['status_pernikahan'],
nomor_akta_nikah=data['nomor_akta_nikah'],
tanggal_pernikahan=data['tanggal_pernikahan'],
nomor_akta_cerai=data['nomor_akta_cerai'],
tanggal_perceraian=data['tanggal_perceraian'],
hubungan_keluarga=data['hubungan_keluarga'],
penyandang_cacat=data['penyandang_cacat'], ibu=data['ibu'],
ayah=data['ayah'], pendidikan=data['pendidikan'],
pekerjaan=data['pekerjaan'], telepon=data['telepon'],
mobile=data['mobile'], keluarga=data['keluarga'],
status_penduduk=data['status_penduduk'],
status_tempat_tinggal=data['status_tempat_tinggal'],
photo=data['photo']
)
UserLogs.objects.create(
user=employee,
user_edited=user,
action=UserLogs.ACTION.created
)
return user
class WargaEditForm(BaseUserForm):
username = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'id': 'username',
'placeholder': 'Username'
}
)
)
def save(self, *args, **kwargs):
kwargs['commit'] = False
data = self.cleaned_data
user = super(WargaEditForm, self).save(*args, **kwargs)
user.username = data['username']
user.name = data['name']
user.email = data['email']
user.save()
user.profile.nik = data['nik']
user.profile.alamat = data['alamat']
user.profile.rt = data['rt']
user.profile.rw = data['rw']
user.profile.province = data['province']
user.profile.district = data['district']
user.profile.regency = data['regency']
user.profile.village = data['village']
user.profile.kode_pos = data['kode_pos']
user.profile.kewarganegaraan = data['kewarganegaraan']
user.profile.wna = data['wna']
user.profile.paspor = data['paspor']
user.profile.paspor_kadaluarsa = data['paspor_kadaluarsa']
user.profile.kelamin = data['kelamin']
user.profile.tempat_lahir = data['tempat_lahir']
user.profile.tanggal_lahir = data['tanggal_lahir']
user.profile.nomor_akta_lahir = data['nomor_akta_lahir']
user.profile.golongan_darah = data['golongan_darah']
user.profile.agama = data['agama']
user.profile.status_pernikahan = data['status_pernikahan']
user.profile.nomor_akta_nikah = data['nomor_akta_nikah']
user.profile.tanggal_pernikahan = data['tanggal_pernikahan']
user.profile.nomor_akta_cerai = data['nomor_akta_cerai']
user.profile.tanggal_perceraian = data['tanggal_perceraian']
user.profile.hubungan_keluarga = data['hubungan_keluarga']
user.profile.penyandang_cacat = data['penyandang_cacat']
user.profile.ibu = data['ibu']
user.profile.ayah = data['ayah']
user.profile.pendidikan = data['pendidikan']
user.profile.pekerjaan = data['pekerjaan']
user.profile.telepon = data['telepon']
user.profile.mobile = data['mobile']
user.profile.keluarga = data['keluarga']
user.profile.status_penduduk = data['status_penduduk']
user.profile.status_tempat_tinggal = data['status_tempat_tinggal']
user.profile.photo = data['photo']
user.profile.save()
print 'PASSS'
return user
class WargaFilterForm(forms.Form):
start = forms.DateField(input_formats=["%Y/%m/%d"], label="Start Date")
end = forms.DateField(input_formats=["%Y/%m/%d"], label="End Date")
def clean(self):
cleaned_data = super(WargaFilterForm, self).clean()
if self.errors:
return cleaned_data
if cleaned_data['start'] > cleaned_data['end']:
self.add_error('start', "Start time can't be greater than end time")
pekerjaan = forms.ModelChoiceField(
required=False,
queryset=Pekerjaan.objects.all(),
widget=autocomplete.ModelSelect2(
url='backoffice:users:pekerjaan_autocomplete')
)
|
|
#!/usr/bin/env python
''' script for filtering insertions vs. the human reference GRCh37/hg19 '''
''' may be useful as a template for extension to other species '''
import pysam
import sys
import os
import logging
import argparse
import align
import numpy as np
import subprocess
from uuid import uuid4
verbose=False
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
tebreak_dir = os.path.dirname(os.path.realpath(__file__))
def load_falib(infa):
seqdict = {}
with open(infa, 'r') as fa:
seqid = ''
seq = ''
for line in fa:
if line.startswith('>'):
if seq != '':
seqdict[seqid] = seq
seqid = line.lstrip('>').strip().split()[0]
seq = ''
else:
assert seqid != ''
seq = seq + line.strip()
if seqid not in seqdict and seq != '':
seqdict[seqid] = seq
return seqdict
def ref_filter(chrom, start, end, superfams, tbx, extend=0):
start = int(start)
end = int(end)
if extend > 0:
start -= extend
end += extend
if start < 0: start = 0
for sf in superfams.split(','):
if sf == 'L1':
if chrom not in tbx[sf].contigs: return True
for ins in tbx[sf].fetch(chrom, start, end): return True
if sf in ('ALU', 'SVA'):
if chrom not in tbx[sf].contigs: return True
for ins in tbx[sf].fetch(chrom, start, end): return True
if sf == 'SVA':
if chrom not in tbx[sf].contigs: return True
for ins in tbx[sf].fetch(chrom, start, end): return True
return False
def len_filter(rec):
telen = int(rec['TE_Align_End']) - int(rec['TE_Align_Start'])
if 'ALU' in rec['Superfamily'] and telen < 250: return True
if 'SVA' in rec['Superfamily'] and telen < 1000: return True
if 'L1' in rec['Superfamily'] and int(rec['TE_Align_End']) < 5950: return True
return False
def avgmap(maptabix, chrom, start, end):
''' return average mappability across chrom:start-end region; maptabix = pysam.Tabixfile'''
scores = []
if None in (start, end): return None
if chrom in maptabix.contigs:
for rec in maptabix.fetch(chrom, int(start), int(end)):
mchrom, mstart, mend, mscore = rec.strip().split()
mstart, mend = int(mstart), int(mend)
mscore = float(mscore)
while mstart < mend and mstart:
mstart += 1
if mstart >= int(start) and mstart <= int(end):
scores.append(mscore)
if len(scores) > 0:
return sum(scores) / float(len(scores))
else:
return 0.0
else:
return 0.0
def rc(dna):
''' reverse complement '''
complements = maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
return dna.translate(complements)[::-1]
def realign_filter(rec, inslib):
seqn = rec['Superfamily'] + ':' + rec['Subfamily']
if seqn not in inslib:
return False
seq_headers = ['Genomic_Consensus_5p', 'Genomic_Consensus_3p', 'Insert_Consensus_5p', 'Insert_Consensus_3p']
matches = []
for seqtype in seq_headers:
if rec[seqtype] == 'NA':
continue
#print seqtype, rec[seqtype]
alignment = align(rec[seqtype], inslib[seqn], rec['Subfamily'])
if alignment:
matches.append([seqtype] + alignment)
return matches
def align(qryseq, refseq, elt):
rnd = str(uuid4())
tgtfa = 'tmp.' + rnd + '.tgt.fa'
qryfa = 'tmp.' + rnd + '.qry.fa'
tgt = open(tgtfa, 'w')
qry = open(qryfa, 'w')
tgt.write('>ref' + '\n' + refseq + '\n')
qry.write('>qry' + '\n' + qryseq + '\n')
tgt.close()
qry.close()
cmd = ['exonerate', '--bestn', '1', '-m', 'ungapped', '--showalignment','0', '--ryo', elt + '\t%s\t%qab\t%qae\t%tab\t%tae\t%pi\n', qryfa, tgtfa]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
best = []
topscore = 0
for pline in p.stdout.readlines():
if pline.startswith(elt):
c = pline.strip().split()
if int(c[1]) > topscore:
topscore = int(c[1])
best = c
os.remove(tgtfa)
os.remove(qryfa)
return best
def overlap(iv1, iv2):
if min(iv1[1], iv2[1]) - max(iv1[0], iv2[0]) > 0: # is there overlap?
return [max(iv1[0], iv2[0]), min(iv1[1], iv2[1])]
return None
def main(args):
l1_ref = tebreak_dir + '/../lib/mask.L1.hg19.bed.gz'
alu_ref = tebreak_dir + '/../lib/mask.Alu.hg19.bed.gz'
sva_ref = tebreak_dir + '/../lib/mask.SVA.hg19.bed.gz'
map_ref = tebreak_dir + '/../lib/wgEncodeCrgMapabilityAlign100mer.bed.gz'
inslib = None
if args.insref:
inslib = load_falib(args.insref)
for fn in (l1_ref, alu_ref, sva_ref):
if not os.path.exists(fn): sys.exit('reference %s not found' % fn)
if not os.path.exists(fn + '.tbi'): sys.exit('index for reference %s not found' %fn)
tbx = {}
tbx['L1'] = pysam.Tabixfile(l1_ref)
tbx['ALU'] = pysam.Tabixfile(alu_ref)
tbx['SVA'] = pysam.Tabixfile(sva_ref)
map_tbx = pysam.Tabixfile(map_ref)
header = []
with open(args.tabfile, 'r') as tab:
for i, line in enumerate(tab):
if i == 0: # header
header = line.strip().split('\t')
if args.realign and args.insref:
header += ['ExonerateRealign']
if args.chimera:
header += ['ChimeraBaseCount', 'ChimeraMatchIns', 'ChimeraMatchRef', 'InsSiteHomology', 'PossibleRefEltChimera']
print '\t'.join(header)
else:
rec = {}
out = True
for n, field in enumerate(line.strip().split('\t')):
rec[header[n]] = field
#logger.debug(rec['UUID'])
if int(rec['3p_Cons_Len']) < 120 and int(rec['5p_Cons_Len']) < 120:
logger.debug('Filtered %s: consensus length < %d' % (rec['UUID'], 120))
out = False
if 'NA' in (rec['TE_Align_Start'], rec['TE_Align_End']):
logger.debug('Filtered %s: TE_Align_Start or TE_Align_End is "NA"' % rec['UUID'])
out = False
ref_present = False
if args.wideref:
ref_present = ref_filter(rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme'], rec['Superfamily'], tbx, extend=10000)
else:
ref_present = ref_filter(rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme'], rec['Superfamily'], tbx)
if ref_present and not args.ignore_ref_filter:
logger.debug('Filtered %s: proximity to reference TE of same superfamily' % rec['UUID'])
out = False
if max(float(rec['5p_Elt_Match']), float(rec['3p_Elt_Match'])) < 0.95:
logger.debug('Filtered %s: max(5p_Elt_Match, 3p_Elt_Match) < 0.95' % rec['UUID'])
out = False
if max(float(rec['5p_Genome_Match']), float(rec['3p_Genome_Match'])) < 0.98:
logger.debug('Filtered %s: max(5p_Genome_Match, 3p_Genome_Match) < 0.98' % rec['UUID'])
out = False
mapscore = avgmap(map_tbx, rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme'])# * (max(int(rec['3p_Cons_Len']), int(rec['5p_Cons_Len']))/100.)
if mapscore < 0.1:
logger.debug('Filtered %s: mappability of %f < 0.1' % (rec['UUID'], mapscore))
out = False
if float(rec['Remapped_Discordant']) < 4:
logger.debug('Filtered %s: low discordant evidence (< 4 reads)' % rec['UUID'])
out = False
if float(rec['Remap_Disc_Fraction']) < 0.5:
logger.debug('Filtered %s: low discordant evidence (< 50pct supporting)' % rec['UUID'])
out = False
if rec['Insert_Consensus_5p'] == rec['Insert_Consensus_3p'] == 'NA':
logger.debug('Filtered %s: no insertion consensus mapped to insertion reference' % rec['UUID'])
out = False
if args.lenfilter and out and len_filter(rec):
logger.debug('Filtered %s: TE length filter' % rec['UUID'])
out = False
align_info = 'NA'
if out and args.realign and args.insref:
align_info = realign_filter(rec, inslib)
if len(align_info) == 0:
out = False
well_aligned = False
for alignment in align_info:
seqtype, _, score, qstart, qend, tstart, tend, pi = alignment
tstart = int(tstart)
tend = int(tend)
pi = float(pi)
if pi >= 95.0 and abs(tend-tstart) >= 100:
well_aligned = True
if not well_aligned: out = False
ins_site_homlen = 0 # insertion site homology length
ins_site_homseq = 'NA' # sequence of overlapped region
ch_ref_present = False
ins_pct_match = 0.0
ref_pct_match = 0.0
if out and args.chimera:
if not args.refgenome:
sys.exit('--refgenome required in conjunction with --chimera')
if not args.insref:
sys.exit('--insref required in conjunction with --chimera')
ref = pysam.Fastafile(args.refgenome)
left = int(rec['Left_Extreme']) - 1000
right = int(rec['Right_Extreme']) + 1000
if left < 0: left = 0
ref_seq = ref.fetch(rec['Chromosome'], left, right)
seqn = rec['Superfamily'] + ':' + rec['Subfamily']
ins_seq = inslib[seqn]
alignside = ''
ins_align = []
gen_align = []
if rec['Genomic_Consensus_3p'] != 'NA':
ins_align = align(rec['Genomic_Consensus_3p'], ins_seq, rec['Subfamily'])
gen_align = align(rec['Genomic_Consensus_3p'], ref_seq, 'Genomic')
alignside = 'Genomic_Consensus_3p'
else:
ins_align = align(rec['Genomic_Consensus_5p'], ins_seq, rec['Subfamily'])
gen_align = align(rec['Genomic_Consensus_5p'], ref_seq, 'Genomic')
alignside = 'Genomic_Consensus_5p'
ins_subcoords = None
if ins_align:
ins_subcoords = map(int, ins_align[2:4])
gen_subcoords = None
if gen_align:
gen_subcoords = map(int, gen_align[2:4])
else:
out = False
ol = None
if gen_subcoords is not None and ins_subcoords is not None:
ol = overlap(ins_subcoords, gen_subcoords)
if ol is not None:
ins_site_homlen = ol[1]-ol[0]
ins_site_homseq = rec[alignside][ol[0]:ol[1]]
ch_align_ins = align(ins_site_homseq, ins_seq, 'Ins')
ch_align_ref = align(ins_site_homseq, ref_seq, 'Ref')
if ch_align_ins:
ins_pct_match = ch_align_ins[-1]
if ch_align_ref:
ref_pct_match = ch_align_ref[-1]
# chimera with adjacent ref element check
ch_ref_present = ref_filter(rec['Chromosome'], rec['Left_Extreme'], rec['Right_Extreme'], rec['Superfamily'], tbx, extend=10000)
if out:
fields = line.strip().split()
if args.insref and args.realign:
fields.append(','.join([';'.join(alignment) for alignment in align_info]))
if args.chimera:
fields.append(str(ins_site_homlen))
fields.append(str(ins_pct_match))
fields.append(str(ref_pct_match))
fields.append(ins_site_homseq)
fields.append(str(ch_ref_present))
print '\t'.join(fields)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='filter script for TEs on hg19')
parser.add_argument('--tabfile', required=True, help='tabular output from resolve.py, requires header to be present')
parser.add_argument('--insref', default=None, help='req. alignment to insertion sequence reference')
parser.add_argument('--ignore_ref_filter', default=False, action='store_true', help='turn of filtering vs. reference elements')
parser.add_argument('--lenfilter', default=False, action='store_true', help='turn on filter by insertion length')
parser.add_argument('--refgenome', default=None)
parser.add_argument('--realign', default=False, action='store_true')
parser.add_argument('--chimera', default=False, action='store_true')
parser.add_argument('--wideref', default=False, action='store_true')
args = parser.parse_args()
main(args)
|
|
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from enum import Enum
import yaml
import json
from attr._make import fields
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
@singledispatch
def to_dict(obj, **kwargs):
"""
Convert an object into dictionary. Uses singledispatch to allow for
clean extensions for custom class types.
Reference: https://pypi.python.org/pypi/singledispatch
:param obj: object instance
:param kwargs: keyword arguments such as suppress_private_attr,
suppress_empty_values, dict_factory
:return: converted dictionary.
"""
# if is_related, then iterate attrs.
if is_model(obj.__class__):
return related_obj_to_dict(obj, **kwargs)
# else, return obj directly. register a custom to_dict if you need to!
# reference: https://pypi.python.org/pypi/singledispatch
else:
return obj
def related_obj_to_dict(obj, **kwargs):
""" Covert a known related object to a dictionary. """
# Explicitly discard formatter kwarg, should not be cascaded down.
kwargs.pop('formatter', None)
# If True, remove fields that start with an underscore (e.g. _secret)
suppress_private_attr = kwargs.get("suppress_private_attr", False)
# if True, don't store fields with None values into dictionary.
suppress_empty_values = kwargs.get("suppress_empty_values", False)
# get list of attrs fields
attrs = fields(obj.__class__)
# instantiate return dict, use OrderedDict type by default
return_dict = kwargs.get("dict_factory", OrderedDict)()
for a in attrs:
# skip if private attr and flag tells you to skip
if suppress_private_attr and a.name.startswith("_"):
continue
metadata = a.metadata or {}
# formatter is a related-specific `attrs` meta field
# see fields.DateField
formatter = metadata.get('formatter')
# get value and call to_dict on it, passing the kwargs/formatter
value = getattr(obj, a.name)
value = to_dict(value, formatter=formatter, **kwargs)
# check flag, skip None values
if suppress_empty_values and value is None:
continue
# field name can be overridden by the metadata field
key_name = a.metadata.get('key') or a.name
# store converted / formatted value into return dictionary
return_dict[key_name] = value
return return_dict
def to_model(cls, value):
"""
Coerce a value into a model object based on a class-type (cls).
:param cls: class type to coerce into
:param value: value to be coerced
:return: original value or coerced value (value')
"""
if isinstance(value, cls) or value is None:
pass # skip if right type or value is None
elif issubclass(cls, Enum):
value = cls(value)
elif is_model(cls) and isinstance(value, dict):
value = convert_key_to_attr_names(cls, value)
value = cls(**value)
else:
value = cls(value)
return value
def convert_key_to_attr_names(cls, original):
""" convert key names to their corresponding attribute names """
attrs = fields(cls)
updated = {}
keys_pulled = set()
for a in attrs:
key_name = a.metadata.get('key') or a.name
if key_name in original:
updated[a.name] = original.get(key_name)
keys_pulled.add(key_name)
if getattr(cls, '__related_strict__', False):
extra = set(original.keys()) - keys_pulled
if len(extra):
raise ValueError("Extra keys (strict mode): {}".format(extra))
return updated
def is_model(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: :class:`bool`
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False,
**kwargs):
"""
Serialize a Python object into a YAML stream with OrderedDict and
default_flow_style defaulted to False.
If stream is None, return the produced string instead.
OrderedDict reference: http://stackoverflow.com/a/21912744
default_flow_style reference: http://stackoverflow.com/a/18210750
:param data: python object to be serialized
:param stream: to be serialized to
:param Dumper: base Dumper class to extend.
:param kwargs: arguments to pass to to_dict
:return: stream if provided, string if stream is None
"""
class OrderedDumper(dumper_cls):
pass
def dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, dict_representer)
obj_dict = to_dict(obj, **kwargs)
return yaml.dump(obj_dict, stream, OrderedDumper,
default_flow_style=default_flow_style)
def from_yaml(stream, cls=None, loader_cls=yaml.Loader,
object_pairs_hook=OrderedDict, **extras):
"""
Convert a YAML stream into a class via the OrderedLoader class.
"""
class OrderedLoader(loader_cls):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
yaml_dict = yaml.load(stream, OrderedLoader) or {}
yaml_dict.update(extras)
return cls(**yaml_dict) if cls else yaml_dict
def to_json(obj, indent=4, sort_keys=True, **kwargs):
"""
:param obj: object to convert to dictionary and then output to json
:param indent: indent json by number of spaces
:param sort_keys: sort json output by key if true
:param kwargs: arguments to pass to to_dict
:return: json string
"""
obj_dict = to_dict(obj, **kwargs)
return json.dumps(obj_dict, indent=indent, sort_keys=sort_keys)
def from_json(stream, cls=None, object_pairs_hook=OrderedDict, **extras):
"""
Convert a JSON string or stream into specified class.
"""
stream = stream.read() if hasattr(stream, 'read') else stream
json_dict = json.loads(stream, object_pairs_hook=object_pairs_hook)
if extras:
json_dict.update(extras) # pragma: no cover
return to_model(cls, json_dict) if cls else json_dict
|
|
import pytest, py, os
from _pytest.core import PluginManager
from _pytest.core import MultiCall, HookRelay, varnames
class TestBootstrapping:
def test_consider_env_fails_to_import(self, monkeypatch):
pluginmanager = PluginManager()
monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",")
pytest.raises(ImportError, "pluginmanager.consider_env()")
def test_preparse_args(self):
pluginmanager = PluginManager()
pytest.raises(ImportError, """
pluginmanager.consider_preparse(["xyz", "-p", "hello123"])
""")
def test_plugin_prevent_register(self):
pluginmanager = PluginManager()
pluginmanager.consider_preparse(["xyz", "-p", "no:abc"])
l1 = pluginmanager.getplugins()
pluginmanager.register(42, name="abc")
l2 = pluginmanager.getplugins()
assert len(l2) == len(l1)
def test_plugin_prevent_register_unregistered_alredy_registered(self):
pluginmanager = PluginManager()
pluginmanager.register(42, name="abc")
l1 = pluginmanager.getplugins()
assert 42 in l1
pluginmanager.consider_preparse(["xyz", "-p", "no:abc"])
l2 = pluginmanager.getplugins()
assert 42 not in l2
def test_plugin_double_register(self):
pm = PluginManager()
pm.register(42, name="abc")
pytest.raises(ValueError, lambda: pm.register(42, name="abc"))
def test_plugin_skip(self, testdir, monkeypatch):
p = testdir.makepyfile(skipping1="""
import pytest
pytest.skip("hello")
""")
p.copy(p.dirpath("skipping2.py"))
monkeypatch.setenv("PYTEST_PLUGINS", "skipping2")
result = testdir.runpytest("-p", "skipping1", "--traceconfig")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*hint*skipping1*hello*",
"*hint*skipping2*hello*",
])
def test_consider_env_plugin_instantiation(self, testdir, monkeypatch):
pluginmanager = PluginManager()
testdir.syspathinsert()
testdir.makepyfile(xy123="#")
monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123')
l1 = len(pluginmanager.getplugins())
pluginmanager.consider_env()
l2 = len(pluginmanager.getplugins())
assert l2 == l1 + 1
assert pluginmanager.getplugin('xy123')
pluginmanager.consider_env()
l3 = len(pluginmanager.getplugins())
assert l2 == l3
def test_consider_setuptools_instantiation(self, monkeypatch):
pkg_resources = py.test.importorskip("pkg_resources")
def my_iter(name):
assert name == "pytest11"
class EntryPoint:
name = "pytest_mytestplugin"
dist = None
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
return iter([EntryPoint()])
monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter)
pluginmanager = PluginManager()
pluginmanager.consider_setuptools_entrypoints()
plugin = pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_consider_setuptools_not_installed(self, monkeypatch):
monkeypatch.setitem(py.std.sys.modules, 'pkg_resources',
py.std.types.ModuleType("pkg_resources"))
pluginmanager = PluginManager()
pluginmanager.consider_setuptools_entrypoints()
# ok, we did not explode
def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):
x500 = testdir.makepyfile(pytest_x500="#")
p = testdir.makepyfile("""
import pytest
def test_hello(pytestconfig):
plugin = pytestconfig.pluginmanager.getplugin('pytest_x500')
assert plugin is not None
""")
monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",")
result = testdir.runpytest(p)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed in*"])
def test_import_plugin_importname(self, testdir):
pluginmanager = PluginManager()
pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")')
reset = testdir.syspathinsert()
pluginname = "pytest_hello"
testdir.makepyfile(**{pluginname: ""})
pluginmanager.import_plugin("pytest_hello")
len1 = len(pluginmanager.getplugins())
pluginmanager.import_plugin("pytest_hello")
len2 = len(pluginmanager.getplugins())
assert len1 == len2
plugin1 = pluginmanager.getplugin("pytest_hello")
assert plugin1.__name__.endswith('pytest_hello')
plugin2 = pluginmanager.getplugin("pytest_hello")
assert plugin2 is plugin1
def test_import_plugin_dotted_name(self, testdir):
pluginmanager = PluginManager()
pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")')
pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")')
reset = testdir.syspathinsert()
testdir.mkpydir("pkg").join("plug.py").write("x=3")
pluginname = "pkg.plug"
pluginmanager.import_plugin(pluginname)
mod = pluginmanager.getplugin("pkg.plug")
assert mod.x == 3
def test_consider_module(self, testdir):
pluginmanager = PluginManager()
testdir.syspathinsert()
testdir.makepyfile(pytest_p1="#")
testdir.makepyfile(pytest_p2="#")
mod = py.std.types.ModuleType("temp")
mod.pytest_plugins = ["pytest_p1", "pytest_p2"]
pluginmanager.consider_module(mod)
assert pluginmanager.getplugin("pytest_p1").__name__ == "pytest_p1"
assert pluginmanager.getplugin("pytest_p2").__name__ == "pytest_p2"
def test_consider_module_import_module(self, testdir):
mod = py.std.types.ModuleType("x")
mod.pytest_plugins = "pytest_a"
aplugin = testdir.makepyfile(pytest_a="#")
pluginmanager = PluginManager()
reprec = testdir.getreportrecorder(pluginmanager)
#syspath.prepend(aplugin.dirpath())
py.std.sys.path.insert(0, str(aplugin.dirpath()))
pluginmanager.consider_module(mod)
call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name)
assert call.plugin.__name__ == "pytest_a"
# check that it is not registered twice
pluginmanager.consider_module(mod)
l = reprec.getcalls("pytest_plugin_registered")
assert len(l) == 1
def test_config_sets_conftesthandle_onimport(self, testdir):
config = testdir.parseconfig([])
assert config._conftest._onimport == config._onimportconftest
def test_consider_conftest_deps(self, testdir):
mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport()
pp = PluginManager()
pytest.raises(ImportError, "pp.consider_conftest(mod)")
def test_pm(self):
pp = PluginManager()
class A: pass
a1, a2 = A(), A()
pp.register(a1)
assert pp.isregistered(a1)
pp.register(a2, "hello")
assert pp.isregistered(a2)
l = pp.getplugins()
assert a1 in l
assert a2 in l
assert pp.getplugin('hello') == a2
pp.unregister(a1)
assert not pp.isregistered(a1)
pp.unregister(name="hello")
assert not pp.isregistered(a2)
def test_pm_ordering(self):
pp = PluginManager()
class A: pass
a1, a2 = A(), A()
pp.register(a1)
pp.register(a2, "hello")
l = pp.getplugins()
assert l.index(a1) < l.index(a2)
a3 = A()
pp.register(a3, prepend=True)
l = pp.getplugins()
assert l.index(a3) == 0
def test_register_imported_modules(self):
pp = PluginManager()
mod = py.std.types.ModuleType("x.y.pytest_hello")
pp.register(mod)
assert pp.isregistered(mod)
l = pp.getplugins()
assert mod in l
pytest.raises(ValueError, "pp.register(mod)")
mod2 = py.std.types.ModuleType("pytest_hello")
#pp.register(mod2) # double pm
pytest.raises(ValueError, "pp.register(mod)")
#assert not pp.isregistered(mod2)
assert pp.getplugins() == l
def test_canonical_import(self, monkeypatch):
mod = py.std.types.ModuleType("pytest_xyz")
monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod)
pp = PluginManager()
pp.import_plugin('pytest_xyz')
assert pp.getplugin('pytest_xyz') == mod
assert pp.isregistered(mod)
def test_register_mismatch_method(self):
pp = PluginManager(load=True)
class hello:
def pytest_gurgel(self):
pass
pytest.raises(Exception, "pp.register(hello())")
def test_register_mismatch_arg(self):
pp = PluginManager(load=True)
class hello:
def pytest_configure(self, asd):
pass
excinfo = pytest.raises(Exception, "pp.register(hello())")
def test_notify_exception(self, capfd):
pp = PluginManager()
excinfo = pytest.raises(ValueError, "raise ValueError(1)")
pp.notify_exception(excinfo)
out, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self, excrepr):
return True
pp.register(A())
pp.notify_exception(excinfo)
out, err = capfd.readouterr()
assert not err
def test_register(self):
pm = PluginManager(load=False)
class MyPlugin:
pass
my = MyPlugin()
pm.register(my)
assert pm.getplugins()
my2 = MyPlugin()
pm.register(my2)
assert pm.getplugins()[1:] == [my, my2]
assert pm.isregistered(my)
assert pm.isregistered(my2)
pm.unregister(my)
assert not pm.isregistered(my)
assert pm.getplugins()[1:] == [my2]
def test_listattr(self):
plugins = PluginManager()
class api1:
x = 41
class api2:
x = 42
class api3:
x = 43
plugins.register(api1())
plugins.register(api2())
plugins.register(api3())
l = list(plugins.listattr('x'))
assert l == [41, 42, 43]
def test_hook_tracing(self):
pm = PluginManager()
saveindent = []
class api1:
x = 41
def pytest_plugin_registered(self, plugin):
saveindent.append(pm.trace.root.indent)
raise ValueError(42)
l = []
pm.trace.root.setwriter(l.append)
indent = pm.trace.root.indent
p = api1()
pm.register(p)
assert pm.trace.root.indent == indent
assert len(l) == 1
assert 'pytest_plugin_registered' in l[0]
pytest.raises(ValueError, lambda: pm.register(api1()))
assert pm.trace.root.indent == indent
assert saveindent[0] > indent
class TestPytestPluginInteractions:
def test_addhooks_conftestplugin(self, testdir):
newhooks = testdir.makepyfile(newhooks="""
def pytest_myhook(xyz):
"new hook"
""")
conf = testdir.makeconftest("""
import sys ; sys.path.insert(0, '.')
import newhooks
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(newhooks)
def pytest_myhook(xyz):
return xyz + 1
""")
config = testdir.Config()
config._conftest.importconftest(conf)
print(config.pluginmanager.getplugins())
res = config.hook.pytest_myhook(xyz=10)
assert res == [11]
def test_addhooks_nohooks(self, testdir):
conf = testdir.makeconftest("""
import sys
def pytest_addhooks(pluginmanager):
pluginmanager.addhooks(sys)
""")
res = testdir.runpytest()
assert res.ret != 0
res.stderr.fnmatch_lines([
"*did not find*sys*"
])
def test_namespace_early_from_import(self, testdir):
p = testdir.makepyfile("""
from pytest import Item
from pytest import Item as Item2
assert Item is Item2
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_do_ext_namespace(self, testdir):
testdir.makeconftest("""
def pytest_namespace():
return {'hello': 'world'}
""")
p = testdir.makepyfile("""
from py.test import hello
import py
def test_hello():
assert hello == "world"
assert 'hello' in py.test.__all__
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_do_option_postinitialize(self, testdir):
config = testdir.parseconfigure()
assert not hasattr(config.option, 'test123')
p = testdir.makepyfile("""
def pytest_addoption(parser):
parser.addoption('--test123', action="store_true",
default=True)
""")
config._conftest.importconftest(p)
assert config.option.test123
def test_configure(self, testdir):
config = testdir.parseconfig()
l = []
class A:
def pytest_configure(self, config):
l.append(self)
config.pluginmanager.register(A())
assert len(l) == 0
config.pluginmanager.do_configure(config=config)
assert len(l) == 1
config.pluginmanager.register(A()) # leads to a configured() plugin
assert len(l) == 2
assert l[0] != l[1]
config.pluginmanager.do_unconfigure(config=config)
config.pluginmanager.register(A())
assert len(l) == 2
# lower level API
def test_listattr(self):
pluginmanager = PluginManager()
class My2:
x = 42
pluginmanager.register(My2())
assert not pluginmanager.listattr("hello")
assert pluginmanager.listattr("x") == [42]
def test_listattr_tryfirst(self):
class P1:
@pytest.mark.tryfirst
def m(self):
return 17
class P2:
def m(self):
return 23
class P3:
def m(self):
return 19
pluginmanager = PluginManager()
p1 = P1()
p2 = P2()
p3 = P3()
pluginmanager.register(p1)
pluginmanager.register(p2)
pluginmanager.register(p3)
methods = pluginmanager.listattr('m')
assert methods == [p2.m, p3.m, p1.m]
# listattr keeps a cache and deleting
# a function attribute requires clearing it
pluginmanager._listattrcache.clear()
del P1.m.__dict__['tryfirst']
pytest.mark.trylast(getattr(P2.m, 'im_func', P2.m))
methods = pluginmanager.listattr('m')
assert methods == [p2.m, p1.m, p3.m]
def test_namespace_has_default_and_env_plugins(testdir):
p = testdir.makepyfile("""
import pytest
pytest.mark
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_varnames():
def f(x):
i = 3
class A:
def f(self, y):
pass
class B(object):
def __call__(self, z):
pass
assert varnames(f) == ("x",)
assert varnames(A().f) == ('y',)
assert varnames(B()) == ('z',)
class TestMultiCall:
def test_uses_copy_of_methods(self):
l = [lambda: 42]
mc = MultiCall(l, {})
repr(mc)
l[:] = []
res = mc.execute()
return res == 42
def test_call_passing(self):
class P1:
def m(self, __multicall__, x):
assert len(__multicall__.results) == 1
assert not __multicall__.methods
return 17
class P2:
def m(self, __multicall__, x):
assert __multicall__.results == []
assert __multicall__.methods
return 23
p1 = P1()
p2 = P2()
multicall = MultiCall([p1.m, p2.m], {'x': 23})
assert "23" in repr(multicall)
reslist = multicall.execute()
assert len(reslist) == 2
# ensure reversed order
assert reslist == [23, 17]
def test_keyword_args(self):
def f(x):
return x + 1
class A:
def f(self, x, y):
return x + y
multicall = MultiCall([f, A().f], dict(x=23, y=24))
assert "'x': 23" in repr(multicall)
assert "'y': 24" in repr(multicall)
reslist = multicall.execute()
assert reslist == [24+23, 24]
assert "2 results" in repr(multicall)
def test_keyword_args_with_defaultargs(self):
def f(x, z=1):
return x + z
reslist = MultiCall([f], dict(x=23, y=24)).execute()
assert reslist == [24]
reslist = MultiCall([f], dict(x=23, z=2)).execute()
assert reslist == [25]
def test_tags_call_error(self):
multicall = MultiCall([lambda x: x], {})
pytest.raises(TypeError, "multicall.execute()")
def test_call_subexecute(self):
def m(__multicall__):
subresult = __multicall__.execute()
return subresult + 1
def n():
return 1
call = MultiCall([n, m], {}, firstresult=True)
res = call.execute()
assert res == 2
def test_call_none_is_no_result(self):
def m1():
return 1
def m2():
return None
res = MultiCall([m1, m2], {}, firstresult=True).execute()
assert res == 1
res = MultiCall([m1, m2], {}).execute()
assert res == [1]
class TestHookRelay:
def test_happypath(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
assert hasattr(mcm, 'hello')
assert repr(mcm.hello).find("hello") != -1
class Plugin:
def hello(self, arg):
return arg + 1
pm.register(Plugin())
l = mcm.hello(arg=3)
assert l == [4]
assert not hasattr(mcm, 'world')
def test_only_kwargs(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
pytest.raises(TypeError, "mcm.hello(3)")
def test_firstresult_definition(self):
pm = PluginManager()
class Api:
def hello(self, arg):
"api hook 1"
hello.firstresult = True
mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he")
class Plugin:
def hello(self, arg):
return arg + 1
pm.register(Plugin())
res = mcm.hello(arg=3)
assert res == 4
class TestTracer:
def test_simple(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("pytest")
log("hello")
l = []
rootlogger.setwriter(l.append)
log("world")
assert len(l) == 1
assert l[0] == "world [pytest]\n"
sublog = log.get("collection")
sublog("hello")
assert l[1] == "hello [pytest:collection]\n"
def test_indent(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
l = []
log.root.setwriter(lambda arg: l.append(arg))
log("hello")
log.root.indent += 1
log("line1")
log("line2")
log.root.indent += 1
log("line3")
log("line4")
log.root.indent -= 1
log("line5")
log.root.indent -= 1
log("last")
assert len(l) == 7
names = [x[:x.rfind(' [')] for x in l]
assert names == ['hello', ' line1', ' line2',
' line3', ' line4', ' line5', 'last']
def test_setprocessor(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
assert log2.tags == tuple("12")
l = []
rootlogger.setprocessor(tuple("12"), lambda *args: l.append(args))
log("not seen")
log2("seen")
assert len(l) == 1
tags, args = l[0]
assert "1" in tags
assert "2" in tags
assert args == ("seen",)
l2 = []
rootlogger.setprocessor("1:2", lambda *args: l2.append(args))
log2("seen")
tags, args = l2[0]
assert args == ("seen",)
def test_setmyprocessor(self):
from _pytest.core import TagTracer
rootlogger = TagTracer()
log = rootlogger.get("1")
log2 = log.get("2")
l = []
log2.setmyprocessor(lambda *args: l.append(args))
log("not seen")
assert not l
log2(42)
assert len(l) == 1
tags, args = l[0]
assert "1" in tags
assert "2" in tags
assert args == (42,)
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*tryfirst*first*",
"*trylast*last*",
])
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime, timedelta
from resource_management import Execute, File
from tempfile import mkstemp
import os
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.core.source import StaticFile
FILE_TYPE_XML = 'XML'
FILE_TYPE_PROPERTIES = 'PROPERTIES'
FILE_TYPE_JAAS_CONF = 'JAAS_CONF'
# The property name used by the hadoop credential provider
HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'
# Copy JCEKS provider to service specific location and update the ACL
def update_credential_provider_path(config, config_type, dest_provider_path, file_owner, file_group):
"""
Copies the JCEKS file for the specified config from the default location to the given location,
and sets the ACLs for the specified owner and group. Also updates the config type's configuration
hadoop credential store provider with the copied file name.
:param config: configurations['configurations'][config_type]
:param config_type: Like hive-site, oozie-site, etc.
:param dest_provider_path: The full path to the file where the JCEKS provider file is to be copied to.
:param file_owner: File owner
:param file_group: Group
:return: A copy of the config that was modified or the input config itself if nothing was modified.
"""
# Get the path to the provider <config_type>.jceks
if HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME in config:
provider_path = config[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME]
src_provider_path = provider_path[len('jceks://file'):]
File(dest_provider_path,
owner = file_owner,
group = file_group,
mode = 0640,
content = StaticFile(src_provider_path)
)
# make a copy of the config dictionary since it is read-only
config_copy = config.copy()
# overwrite the provider path with the path specified
config_copy[HADOOP_CREDENTIAL_PROVIDER_PROPERTY_NAME] = 'jceks://file{0}'.format(dest_provider_path)
return config_copy
return config
def validate_security_config_properties(params, configuration_rules):
"""
Generic security configuration validation based on a set of rules and operations
:param params: The structure where the config parameters are held
:param configuration_rules: A structure containing rules and expectations,
Three types of checks are currently supported by this method:
1. value_checks - checks that a certain value must be set
2. empty_checks - checks that the property values must not be empty
3. read_checks - checks that the value represented by the property describes a readable file on the filesystem
:return: Issues found - should be empty if all is good
"""
issues = {}
for config_file, rule_sets in configuration_rules.iteritems():
# Each configuration rule set may have 0 or more of the following rule sets:
# - value_checks
# - empty_checks
# - read_checks
try:
# Each rule set has at least a list of relevant property names to check in some way
# The rule set for the operation of 'value_checks' is expected to be a dictionary of
# property names to expected values
actual_values = params[config_file] if config_file in params else {}
# Process Value Checks
# The rules are expected to be a dictionary of property names to expected values
rules = rule_sets['value_checks'] if 'value_checks' in rule_sets else None
if rules:
for property_name, expected_value in rules.iteritems():
actual_value = get_value(actual_values, property_name, '')
if actual_value != expected_value:
issues[config_file] = "Property %s contains an unexpected value. " \
"Expected/Actual: %s/%s" \
% (property_name, expected_value, actual_value)
# Process Empty Checks
# The rules are expected to be a list of property names that should not have empty values
rules = rule_sets['empty_checks'] if 'empty_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, '')
if not actual_value:
issues[config_file] = "Property %s must exist and must not be empty" % property_name
# Process Read Checks
# The rules are expected to be a list of property names that resolve to files names and must
# exist and be readable
rules = rule_sets['read_checks'] if 'read_checks' in rule_sets else None
if rules:
for property_name in rules:
actual_value = get_value(actual_values, property_name, None)
if not actual_value:
issues[config_file] = "Property %s does not exist" % property_name
elif not os.path.isfile(actual_value):
issues[config_file] = "Property %s points to an inaccessible file - %s" % (property_name, actual_value)
except Exception as e:
issues[config_file] = "Exception occurred while validating the config file\nCauses: %s" % str(e)
return issues
def build_expectations(config_file, value_checks, empty_checks, read_checks):
"""
Helper method used to build the check expectations dict
:return:
"""
configs_expectations = {}
configs_expectations[config_file] = {}
if value_checks:
configs_expectations[config_file]['value_checks'] = value_checks
if empty_checks:
configs_expectations[config_file]['empty_checks'] = empty_checks
if read_checks:
configs_expectations[config_file]['read_checks'] = read_checks
return configs_expectations
def get_params_from_filesystem(conf_dir, config_files):
"""
Used to retrieve properties from xml config files and build a dict
The dictionary of configuration files to file types should contain one of the following values"
'XML'
'PROPERTIES'
:param conf_dir: directory where the configuration files sit
:param config_files: dictionary of configuration file names to (supported) file types
:return: a dictionary of config-type to a dictionary of key/value pairs for
"""
result = {}
from xml.etree import ElementTree as ET
import ConfigParser, StringIO
import re
for config_file, file_type in config_files.iteritems():
file_name, file_ext = os.path.splitext(config_file)
config_filepath = conf_dir + os.sep + config_file
if not os.path.isfile(config_filepath):
continue
if file_type == FILE_TYPE_XML:
configuration = ET.parse(config_filepath)
props = configuration.getroot().getchildren()
config_file_id = file_name if file_name else config_file
result[config_file_id] = {}
for prop in props:
result[config_file_id].update({prop[0].text: prop[1].text})
elif file_type == FILE_TYPE_PROPERTIES:
with open(config_filepath, 'r') as f:
config_string = '[root]\n' + f.read()
ini_fp = StringIO.StringIO(re.sub(r'\\\s*\n', '\\\n ', config_string))
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
props = config.items('root')
result[file_name] = {}
for key, value in props:
result[file_name].update({key : value})
elif file_type == FILE_TYPE_JAAS_CONF:
section_header = re.compile('^(\w+)\s+\{\s*$')
section_data = re.compile('^\s*([^ \s\=\}\{]+)\s*=?\s*"?([^ ";]+)"?;?\s*$')
section_footer = re.compile('^\}\s*;?\s*$')
section_name = "root"
result[file_name] = {}
with open(config_filepath, 'r') as f:
for line in f:
if line:
line = line.strip()
m = section_header.search(line)
if m:
section_name = m.group(1)
if section_name not in result[file_name]:
result[file_name][section_name] = {}
else:
m = section_footer.search(line)
if m:
section_name = "root"
else:
m = section_data.search(line)
if m:
result[file_name][section_name][m.group(1)] = m.group(2)
return result
def cached_kinit_executor(kinit_path, exec_user, keytab_file, principal, hostname, temp_dir,
expiration_time=5):
"""
Main cached kinit executor - Uses a temporary file on the FS to cache executions. Each command
will have its own file and only one entry (last successful execution) will be stored
"""
key = str(hash("%s|%s" % (principal, keytab_file)))
filename = key + "_tmp.txt"
file_path = temp_dir + os.sep + "kinit_executor_cache"
output = None
# First execution scenario dir file existence check
if not os.path.exists(file_path):
os.makedirs(file_path)
file_path += os.sep + filename
# If the file does not exist create before read
if not os.path.isfile(file_path):
with open(file_path, 'w+') as new_file:
new_file.write("{}")
try:
with open(file_path, 'r') as cache_file:
output = json.load(cache_file)
except:
# In the extraordinary case the temporary file gets corrupted the cache should be reset to avoid error loop
with open(file_path, 'w+') as cache_file:
cache_file.write("{}")
if (not output) or (key not in output) or ("last_successful_execution" not in output[key]):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
else:
last_run_time = output[key]["last_successful_execution"]
now = datetime.now()
if (now - datetime.strptime(last_run_time, "%Y-%m-%d %H:%M:%S.%f") > timedelta(minutes=expiration_time)):
new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
def new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname):
"""
Entry point of an actual execution - triggered when timeout on the cache expired or on fresh execution
"""
now = datetime.now()
temp_kinit_cache_fd, temp_kinit_cache_filename = mkstemp(dir=temp_dir)
command = "%s -c %s -kt %s %s" % \
(kinit_path, temp_kinit_cache_filename, keytab_file,
principal.replace("_HOST", hostname))
os.close(temp_kinit_cache_fd)
try:
# Ensure the proper user owns this file
File(temp_kinit_cache_filename, owner=exec_user, mode=0600)
# Execute the kinit
Execute(command, user=exec_user)
with open(file_path, 'w+') as cache_file:
result = {key: {"last_successful_execution": str(now)}}
json.dump(result, cache_file)
finally:
File(temp_kinit_cache_filename, action='delete')
def get_value(values, property_path, default_value):
names = property_path.split('/')
current_dict = values
for name in names:
if name in current_dict:
current_dict = current_dict[name]
else:
return default_value
return current_dict
|
|
from collections import OrderedDict as dict
import pandas as pd
import numpy as np
__all__ = ['CatalogueGroup',
'Catalogue']
class CatalogueGroup(object):
""" CatalogueGroup class.
This is basically a dictionary used by Catalogue class. The idea is
to be able to store different types of data inside the main catalogue
instance.
Since it's a Sorted Dictionary all the data remain in order as they
are inserted, and the performances are better than standard lists.
CatalogueGroup class store similar items or relationship. This collections of items
could be seen as a group. The group could be defined by the type
of the class, name, tag or anything. It's will depend on the needs
and this will be managed for the main catalog class.
This class will also manage the callbacks so the catalogue will be able
to know whether an element has been added, removed or modified.
Each CatalogueGroup has a (name) and a function (callback) when action is
performed inside the dictionary.
The function callback need to be implemented as follows:
def callback_name(id, key, option):
Where:
name: name of the current GRoup or dictionary
key: the item that has been, added, removed or modifed
option: action performed added, removed or modifed
"""
ADDED = 0
REMOVED = 1
MODIFIED = 2
@property
def items(self):
return self._items
@property
def name(self):
return self._name
def __init__(self, name, callback=None):
""" Initialize all the variables
"""
self._name = name
self._items = dict()
self._callback = callback
def _notify(self, key, option):
"""Notify when a new element has been added.
"""
if self._callback is not None:
self._callback(self._name, key, option)
def __setitem__(self, key, value):
"""Add a new items into the items list.
"""
if key in self._items:
option = CatalogueGroup.MODIFIED
else:
option = CatalogueGroup.ADDED
#Set the current item (added or modified)
self._items[key] = value
# Finally callback to the function
self._notify(key, option)
def __delitem__(self, key):
""" Delete current item for the list
"""
del self._items[key]
# Finally notify to the callback
self._notify(key, CatalogueGroup.REMOVED)
def __getitem__(self, key):
"""Retrieve the items with the given key
"""
return self._items[key]
def __contains__(self, key):
"""Returns whether the key is in items or not.
"""
return key in self._items
def __iter__(self):
"""Retrieve the items elements using loops statements.
This usually are more efficent in terms of memory
"""
for item in self._items:
yield item
def __str__(self):
""" Build the default function to represent the catalogue
user friendly
"""
result = ""
for index, item in enumerate(self._items):
result += " item {} <{}> : {} \n".format(index, item, str(self._items[item]))
return result
class Catalogue(object):
""" Catalogue Class
This class will be used to store different types of data and
bind them. The class will manage different groups or dictionaries
depending on the types to be stored.
Catalogue works as a database. For this particular reason it will need
columns and indexes in order to works properly. In the constructor
we have to pass he index name of the group that will be used to
indexing the data.
ej. You have three groups: Entity, Component_type2 and Component_type2.
(The same catalog should be shared between the three groups)
The index will be "Entity", this means we can bind and Entity with a
component_typeXX. Then we can use the pandas dataframe to pick
the elements you need.
>> #Get the dataframe form the catalogue
>> df = Catalogue.dataframe
>> # Get the current entity-components
>> entity_component = df.loc[:,component_types].dropna(axis=0)
>> print(df.head())
Component_type1 Component_type2
entity02 NaN NaN
entity03 Transform03 NaN
entity05 NaN NaN
The index is given in the contructor of the class, where index is
the name of a group (existing) in the catalogue.
However this Class could be used for any other pourpose
like managing resources, references or any map-reduce
systems that requires a Hastable to store some data
and make relation between data
Basically, Catalogue will store:
- Entities created (depending on the type)
- Components created (depensding on the type)
- Systems created ( depending on the type)
- Mapping between entities, components and Systems
The maping between each data will be performed by
calling the link function of catalogue. This is to
link entities and components.
To show the data stored, with the catalogue and bindings,
you can use two ways.
# Create the main Catalogue
catalogue = Catalogue(index = catalogue_index)
print(catalogue)
print(repr(catalogue.head()))
or
print(catalogue.dataframe.head(10))
Basic example
# Test
catalogue_index = "Entity"
catalogue_col1 = "Transform"
catalogue_col2 = "Position"
catalogue_col3 = "Health"
catalogue_col4 = "Renderable"
entities = ["entity01", "entity02", "entity03", "entity04","entity05"]
components = [catalogue_col1, catalogue_col2, catalogue_col3,catalogue_col4]
entity01_comp = ["Transform01", None , None , "Renderable01" ]
entity02_comp = ["Transform02", None , "Health02" , "Renderable02" ]
entity03_comp = ["Transform03", "Position03", None , None ]
entity04_comp = ["Transform04", "Position04", "Health04" , None ]
entity05_comp = ["Transform05", None , "Health05" , "Renderable05" ]
entities_comp = [entity01_comp, entity02_comp, entity03_comp, entity04_comp, entity05_comp ]
# Create the main Catalogue
catalogue = Catalogue(index = catalogue_index)
# Add all the entities into the catalogue
for index, entity in enumerate(entities):
# Add current entity
catalogue[catalogue_index][entity] = entity
# Add component for the current entity
for cindex, ctype in enumerate(components):
comp_instance = entities_comp[index][cindex]
if comp_instance is not None:
# Add current component to the catalogue
catalogue[ctype][comp_instance] = comp_instance
# Bind the current comp with it's entity
catalogue.bind(entity, ctype, comp_instance)
print(catalogue)
print(catalogue.dataframe.head(10))
Output:
GROUP Entity:
item 0 <entity01> : entity01
item 1 <entity02> : entity02
item 2 <entity03> : entity03
item 3 <entity04> : entity04
item 4 <entity05> : entity05
GROUP Transform:
item 0 <Transform01> : Transform01
item 1 <Transform02> : Transform02
...
item 1 <Health04> : Health04
item 2 <Health05> : Health05
GROUP Position:
item 0 <Position03> : Position03
item 1 <Position04> : Position04
Dataframe:
Transform Renderable Health Position
entity01 Transform01 Renderable01 NaN NaN
entity02 Transform02 Renderable02 Health02 NaN
entity03 Transform03 NaN NaN Position03
entity04 Transform04 NaN Health04 Position04
entity05 Transform05 Renderable05 Health05 NaN
# Delete and index entity
del catalogue[catalogue_index]["entity01"]
del catalogue[catalogue_index]["entity04"]
print(catalogue.dataframe.head(10))
# Delete components
del catalogue[catalogue_col1]["Transform02"]
del catalogue[catalogue_col4]["Renderable05"]
del catalogue[catalogue_col2]["Position04"]
Transform Renderable Health Position
entity02 NaN NaN NaN NaN
entity03 Transform03 NaN NaN Position03
entity05 NaN NaN NaN NaN
"""
@property
def dataframe(self):
return self._dataframe
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def __init__(self, index):
"""Initialize variables and objects.
"""
# Create a catalogue with all the entities and components
self._groups = dict()
self._items = dict()
self._index = index
# Create the datafram to map the entity - components
self._dataframe = pd.DataFrame()
def __setitem__(self, key, value):
""" Catlogue doesn't allow to create new items manually
"""
pass
def __getitem__(self, key):
"""Retrieve the items with the given key
"""
# Check if the calalogue has been already created.
if key not in self._groups:
self._groups[key] = CatalogueGroup(key, self._callback_group)
return self._groups[key]
def __contains__(self, key):
"""Returns whether the key is in items or not.
"""
return key in self._groups
def __iter__(self):
"""Retrieve the items elements using loops statements.
This usually are more efficent in terms of memory
"""
for group in self._groups:
yield group
def __getattr__(self, key):
""" If not in attrbites the search on pandas dataframe
"""
if key in self.__dict__:
return getattr(self, key)
else:
return getattr(self._dataframe, key)
def __str__(self):
""" Build the default function to represent the catalogue
user friendly
"""
result = ""
for key, value in self._groups.items():
result += "GROUP {}:\n".format(key)
result += str(self._groups[key])
return result
def __repr__(self):
""" Perpare the catalogue to be represented as object to
be saved and loaded later on.
"""
pass
def _item_added(self, key, item):
""" Iten has been added
"""
#print("Element added in {}: {}".format(id,key))
# Add current iten into the all list
self._items[item] = self[key][item]
def _item_removed(self, key, item):
""" Item has been removed from the Dict
"""
#print("Element removed in {}: {}".format(id,key))
if key == self._index and key in self._dataframe.index:
# Remove the current row
self._dataframe.drop(item, inplace=True)
else:
# Remove the element from the curren col
self.unbind(key, item, None)
# Remove the item from the full list
del self._items[item]
def _item_modified(self, key, item):
""" Item has been modified
"""
# Replace the item
self._items[item] = self[key][item]
def _callback_group(self, key, item, option):
""" Function call-call back when new element is
inserted into a list.
"""
if option == CatalogueGroup.ADDED:
# Create new mapping based on the added item
self._item_added(key, item)
elif option == CatalogueGroup.REMOVED:
# Create new mapping based on the added item
self._item_removed(key, item)
else:
# Create new mapping based on the added item
self._item_modified(key, item)
def get(self, item):
""" This function will look for the current key item
inside all the items stored
"""
return self._items[item]
def bind(self, index, column, item):
""" This function will map the current item with the
given index and col.
"""
# Bind the current index, col using dataframe
if self._dataframe.empty:
# Add the current data into the attributes frame
self._dataframe = pd.DataFrame([item], index = [index], columns=[column])
else:
# Add the current data into the attributes frame
self._dataframe.loc[index,column] = item
def unbind(self, index, column, item):
""" This function will unbind the current key from the catalogue.
"""
if column in self._dataframe.columns and index in self._dataframe.index:
# Remove the element from the curren col
self._dataframe.loc[index,column] = np.NaN
|
|
"""distutils.dir_util
Utility functions for manipulating directories and directory trees."""
import os, sys
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0o777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, str):
raise DistutilsInternalError(
"mkpath: 'name' must be a string (got %r)" % (name,))
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = set()
for file in files:
need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
# Now create them
for dir in sorted(need_dir):
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = os.listdir(src)
except OSError as e:
(errno, errstr) = e
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
from distutils.util import grok_environment_error
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except OSError as exc:
log.warn(grok_environment_error(
exc, "error removing %s: " % directory))
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# name: website.py
# author: Harold Bradley III
# email: harold@bradleystudio.net
# date: 11/11/2015
#
# pylint: disable=line-too-long
"""
ww.website
~~~~~~~~~~
A class to manage websites
"""
from __future__ import absolute_import, print_function
from subprocess import check_output, CalledProcessError
try:
from ext_pylib.files import Dir, File, TemplateFile
from ext_pylib.input import prompt
except ImportError:
raise ImportError('ext_pylib must be installed to run ww')
from . import settings as s
from .htaccess import Htaccess
from .vhost import Vhost
from .website_domain import WebsiteDomain
def localhost(function):
"""A python decorator that creates a temporary host entry before
the function is called and removes it after it is completed."""
def function_wrapper(self, *args, **kwargs):
"""Adds a hostentry for self.domain to resolve to localhost."""
print('Adding temporary host entry.')
remove_entry = True
cmd = "echo '127.0.0.1 " + self.domain + "' | cat >> /etc/hosts"
try:
check_output(cmd, shell=True)
except CalledProcessError:
print('[WARN] Error adding temporary host entry')
remove_entry = False
result = function(self, *args, **kwargs)
if remove_entry:
print('Removing temporary host entry.')
cmd = "sed -i '/^127\.0\.0\.1 " + self.domain + "$/d' /etc/hosts" # pylint: disable=anomalous-backslash-in-string
if check_output(cmd, shell=True) != 0:
print('[WARN] Error removing temporary host entry.')
return result
return function_wrapper
def merge_atts(atts, new_atts):
"""Merges two dictionaries with the second overwriting the corresponding
values of the first and returns the result."""
if not atts:
return new_atts
if not new_atts:
return atts
for k, _ in new_atts.iteritems():
if (k in atts and isinstance(atts[k], dict)
and isinstance(new_atts[k], dict)):
atts[k] = merge_atts(atts[k], new_atts[k])
else:
atts[k] = new_atts[k]
return atts
class Website(object): # pylint: disable=too-many-instance-attributes
"""A class that describes a generic website with the following properties:
domain Website domain
htdocs Website root directory
assets Website assets directory
log Website log directory
access_log Website access log file
error_log Website error log file
vhost Apache vhost config file
htaccess Website root htaccess file
"""
def __init__(self, domain, atts=None):
"""Initializes a new Website instance."""
atts = atts or {}
print('[*] SSL not yet implemented in Website class.')
self.domain = WebsiteDomain(domain)
self.dirs = {}
self.files = {}
default_atts = {
'root' : {
'path' : s.WWW_DIR + self.domain,
'perms' : 0775,
'owner' : s.WWW_ADMIN,
'group' : s.WWW_ADMIN,
},
'htdocs' : {
'path' : s.WWW_DIR + self.domain + '/htdocs/',
'perms' : 0775,
'owner' : s.WWW_USR,
'group' : s.WWW_USR,
},
'assets' : {
'path' : s.WWW_DIR + self.domain + '/assets/',
'perms' : 0775,
'owner' : 'root',
'group' : s.WWW_ADMIN,
},
'log' : {
'path' : s.WWW_DIR + self.domain + '/log/',
'perms' : 0770,
'owner' : 'root',
'group' : s.WWW_ADMIN,
},
'access_log' : {
'path' : s.WWW_DIR + self.domain + '/log/access_log',
'perms' : 0750,
'owner' : 'root',
'group' : s.WWW_ADMIN,
},
'error_log' : {
'path' : s.WWW_DIR + self.domain + '/log/error_log',
'perms' : 0750,
'owner' : 'root',
'group' : s.WWW_ADMIN,
},
'vhost' : {
'path' : s.VHOST_PATH + self.domain + '.conf',
'perms' : 0644,
'owner' : 'root',
'group' : 'root',
'domain': self.domain,
},
'htaccess' : {
'path' : s.WWW_DIR + self.domain + '/htdocs/.htaccess',
'perms' : 0664,
'owner' : s.WWW_USR,
'group' : s.WWW_USR,
'sections' : [
{'identifier' : 'h5g', 'path' : s.HTA_5G_TEMPLATE},
]
},
}
atts = merge_atts(default_atts, atts)
# Initialize Vhost first, in order to possibly parse atts from it
self.vhost = Vhost(atts['vhost'])
if self.vhost.exists() and prompt(str(self.vhost) + ' already exists.\n' + \
'Use existing vhost configuration for htdocs directory and log file location?'):
atts = merge_atts(atts, self.vhost.parse())
else: # Otherwise, use the template
self.vhost.data = TemplateFile({'path' : s.VHOST_TEMPLATE}).apply_using({
'#DOMAIN#' : self.domain.name,
'#HTDOCS#' : s.WWW_DIR + self.domain + '/htdocs/',
'#EMAIL#' : s.SITE_ADMIN_EMAIL,
'#ACCESS_LOG#' : s.WWW_DIR + self.domain + '/log/access_log',
'#ERROR_LOG#' : s.WWW_DIR + self.domain + '/log/error_log',
})
# Initialize Directories
self.root = Dir(atts['root'])
self.htdocs = Dir(atts['htdocs'])
self.assets = Dir(atts['assets'])
self.log = Dir(atts['log'])
# Initialize Files
self.htaccess = Htaccess(atts['htaccess'])
self.access_log = File(atts['access_log'])
self.error_log = File(atts['error_log'])
def __str__(self):
"""Returns a string with relevant instance information."""
string = '\n\n----------------------------------------------------------'
string += '\n - Website -'
string += '\n----------------------------------------------------------'
string += '\n domain: ' + str(self.domain)
string += '\n vhost config: ' + str(self.vhost)
string += '\n htdocs directory: ' + str(self.htdocs)
string += '\n assets: ' + str(self.assets)
string += '\n log: ' + str(self.log)
string += '\n----------------------------------------------------------\n'
return string
def __repr__(self):
"""Returns a python string that evaluates to the object instance."""
return self.__class__.__name__ + "('" + \
self.domain.name + "', {" + \
"'htdocs' : " + str(self.htdocs.get_atts()) + \
", 'assets' : " + str(self.assets.get_atts()) + \
", 'log' : " + str(self.log.get_atts()) + \
", 'access_log' : " + str(self.access_log.get_atts()) + \
", 'error_log' : " + str(self.error_log.get_atts()) + \
", 'htaccess' : " + str(self.htaccess.get_atts()) + \
", 'vhost' : " + str(self.vhost.get_atts()) + "})"
def install(self):
"""Installs website to server"""
# Check if domain is already installed
if self.is_installed():
print(self.domain + ' is already installed.')
self.create_directories()
self.create_files()
self.vhost.create()
self.domain.set_ip()
print(str(self))
print('Installation complete.')
def create_directories(self):
"""Creates website directories."""
self.root.create()
self.htdocs.create()
self.assets.create()
self.log.create()
def create_files(self):
"""Creates website log files and htaccess file."""
self.access_log.create()
self.error_log.create()
self.htaccess.create()
def remove(self, ask=True):
"""Removes website from server"""
self.vhost.disable(ask)
self.vhost.remove(ask)
self.access_log.remove(ask)
self.error_log.remove(ask)
self.htaccess.remove(ask)
self.log.remove(ask)
self.htdocs.remove(ask)
self.assets.remove(ask)
self.root.remove(ask)
self.domain.set_ip()
def pack(self, tarball=None):
"""Packs the htdocs, assets, and vhost files into a tarball."""
import tarfile
if not tarball:
tarball = '/tmp/packed-' + self.domain.name + '.tar.gz'
with tarfile.open(tarball, 'w:gz') as tar:
tar.add(self.htdocs.path, arcname='htdocs')
tar.add(self.assets.path, arcname='assets')
tar.add(self.vhost.path, arcname='vhost.conf')
def unpack(self, tarball=None, location=None, use_vhost=True):
"""Unpacks the previously packed htdocs, assets, and vhost files."""
import tarfile
if not tarball:
tarball = '/tmp/packed-' + self.domain.name + '.tar.gz'
if not location:
location = '/tmp/unpacked-' + self.domain.name
with tarfile.open(tarball, 'r') as tar:
tar.extractall(location)
if use_vhost:
self.create_directories()
self.create_files()
self.vhost.create(File({'path' : location + '/vhost.conf'}).read())
else:
self.install()
self.htdocs.fill(Dir({'path' : location + '/htdocs/'}))
self.assets.fill(Dir({'path' : location + '/assets/'}))
def migrate(self, new_domain):
"""Migrates a website from one domain to another.
This is essentially making an exact copy of the website using a
different domain."""
self.domain.name = new_domain
# if not old_website:
# old_website = self.existing
# make sure there's nothing conflicting
# install() new
# this overwrites vhostconf...
# for dirs and files in old copy
# this includes vhost...
def verify(self, repair=False):
"""Verifies website installation"""
result = all([self.vhost.verify(repair),
self.access_log.verify(repair),
self.error_log.verify(repair),
self.htaccess.verify(repair),
self.log.verify(repair),
self.htdocs.verify(repair),
self.assets.verify(repair),
self.root.verify(repair),
self.domain.verify(repair)])
print(self)
return result
def repair(self):
"""Repairs website installation"""
print('Repairing ' + self.domain + '...')
if self.verify(True):
print('[OK] Repair completed successfully.')
else:
print('[ERROR] Repair resulted in errors.')
def is_installed(self):
"""Returns true if vhost is enabled"""
return self.vhost.is_enabled()
|
|
# -*- coding: utf-8 -*-
import inspect
import functools
import marshmallow as ma
from marshmallow import validate, fields
from sqlalchemy.dialects import postgresql, mysql, mssql
import sqlalchemy as sa
from .exceptions import ModelConversionError
from .fields import Related
def _is_field(value):
return (
isinstance(value, type) and
issubclass(value, fields.Field)
)
def _has_default(column):
return (
column.default is not None or
column.server_default is not None or
_is_auto_increment(column)
)
def _is_auto_increment(column):
return (
column.table is not None and
column is column.table._autoincrement_column
)
def _postgres_array_factory(converter, data_type):
return functools.partial(
fields.List,
converter._get_field_class_for_data_type(data_type.item_type),
)
def _should_exclude_field(column, fields=None, exclude=None):
if fields and column.key not in fields:
return True
if exclude and column.key in exclude:
return True
return False
class ModelConverter(object):
"""Class that converts a SQLAlchemy model into a dictionary of corresponding
marshmallow `Fields <marshmallow.fields.Field>`.
"""
SQLA_TYPE_MAPPING = {
sa.Enum: fields.Field,
postgresql.BIT: fields.Integer,
postgresql.UUID: fields.UUID,
postgresql.MACADDR: fields.String,
postgresql.INET: fields.String,
postgresql.JSON: fields.Raw,
postgresql.JSONB: fields.Raw,
postgresql.HSTORE: fields.Raw,
postgresql.ARRAY: _postgres_array_factory,
mysql.BIT: fields.Integer,
mysql.YEAR: fields.Integer,
mysql.SET: fields.List,
mysql.ENUM: fields.Field,
mssql.BIT: fields.Integer,
}
DIRECTION_MAPPING = {
'MANYTOONE': False,
'MANYTOMANY': True,
'ONETOMANY': True,
}
def __init__(self, schema_cls=None):
self.schema_cls = schema_cls
@property
def type_mapping(self):
if self.schema_cls:
return self.schema_cls.TYPE_MAPPING
else:
return ma.Schema.TYPE_MAPPING
def fields_for_model(self, model, include_fk=False, fields=None, exclude=None, dict_cls=dict):
result = dict_cls()
for prop in model.__mapper__.iterate_properties:
if _should_exclude_field(prop, fields=fields, exclude=exclude):
continue
if hasattr(prop, 'columns'):
if not include_fk:
# Only skip a column if there is no overriden column
# which does not have a Foreign Key.
for column in prop.columns:
if not column.foreign_keys:
break
else:
continue
field = self.property2field(prop)
if field:
result[prop.key] = field
return result
def fields_for_table(self, table, include_fk=False, fields=None, exclude=None, dict_cls=dict):
result = dict_cls()
for column in table.columns:
if _should_exclude_field(column, fields=fields, exclude=exclude):
continue
if not include_fk and column.foreign_keys:
continue
field = self.column2field(column)
if field:
result[column.key] = field
return result
def property2field(self, prop, instance=True, **kwargs):
field_class = self._get_field_class_for_property(prop)
if not instance:
return field_class
field_kwargs = self._get_field_kwargs_for_property(prop)
field_kwargs.update(kwargs)
ret = field_class(**field_kwargs)
if (
hasattr(prop, 'direction') and
self.DIRECTION_MAPPING[prop.direction.name] and
prop.uselist is True
):
ret = fields.List(ret, **kwargs)
return ret
def column2field(self, column, instance=True, **kwargs):
field_class = self._get_field_class_for_column(column)
if not instance:
return field_class
field_kwargs = self.get_base_kwargs()
self._add_column_kwargs(field_kwargs, column)
field_kwargs.update(kwargs)
return field_class(**field_kwargs)
def field_for(self, model, property_name, **kwargs):
prop = model.__mapper__.get_property(property_name)
return self.property2field(prop, **kwargs)
def _get_field_class_for_column(self, column):
return self._get_field_class_for_data_type(column.type)
def _get_field_class_for_data_type(self, data_type):
field_cls = None
types = inspect.getmro(type(data_type))
# First search for a field class from self.SQLA_TYPE_MAPPING
for col_type in types:
if col_type in self.SQLA_TYPE_MAPPING:
field_cls = self.SQLA_TYPE_MAPPING[col_type]
if callable(field_cls) and not _is_field(field_cls):
field_cls = field_cls(self, data_type)
break
else:
# Try to find a field class based on the column's python_type
try:
python_type = data_type.python_type
except NotImplementedError:
python_type = None
if python_type in self.type_mapping:
field_cls = self.type_mapping[python_type]
else:
raise ModelConversionError(
'Could not find field column of type {0}.'.format(types[0]))
return field_cls
def _get_field_class_for_property(self, prop):
if hasattr(prop, 'direction'):
field_cls = Related
else:
column = prop.columns[0]
field_cls = self._get_field_class_for_column(column)
return field_cls
def _get_field_kwargs_for_property(self, prop):
kwargs = self.get_base_kwargs()
if hasattr(prop, 'columns'):
column = prop.columns[0]
self._add_column_kwargs(kwargs, column)
if hasattr(prop, 'direction'): # Relationship property
self._add_relationship_kwargs(kwargs, prop)
if getattr(prop, 'doc', None): # Useful for documentation generation
kwargs['description'] = prop.doc
return kwargs
def _add_column_kwargs(self, kwargs, column):
"""Add keyword arguments to kwargs (in-place) based on the passed in
`Column <sqlalchemy.schema.Column>`.
"""
if column.nullable:
kwargs['allow_none'] = True
kwargs['required'] = not column.nullable and not _has_default(column)
if hasattr(column.type, 'enums'):
kwargs['validate'].append(validate.OneOf(choices=column.type.enums))
# Add a length validator if a max length is set on the column
if hasattr(column.type, 'length'):
kwargs['validate'].append(validate.Length(max=column.type.length))
if hasattr(column.type, 'scale'):
kwargs['places'] = getattr(column.type, 'scale', None)
def _add_relationship_kwargs(self, kwargs, prop):
"""Add keyword arguments to kwargs (in-place) based on the passed in
relationship `Property`.
"""
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
if prop.uselist is True:
nullable = False
break
kwargs.update({
'allow_none': nullable,
'required': not nullable,
})
def get_base_kwargs(self):
return {
'validate': []
}
default_converter = ModelConverter()
fields_for_model = default_converter.fields_for_model
"""Generate a dict of field_name: `marshmallow.fields.Field` pairs for the
given model.
:param model: The SQLAlchemy model
:param bool include_fk: Whether to include foreign key fields in the output.
:return: dict of field_name: Field instance pairs
"""
property2field = default_converter.property2field
"""Convert a SQLAlchemy `Property` to a field instance or class.
:param Property prop: SQLAlchemy Property.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:param kwargs: Additional keyword arguments to pass to the field constructor.
:return: A `marshmallow.fields.Field` class or instance.
"""
column2field = default_converter.column2field
"""Convert a SQLAlchemy `Column <sqlalchemy.schema.Column>` to a field instance or class.
:param sqlalchemy.schema.Column column: SQLAlchemy Column.
:param bool instance: If `True`, return `Field` instance, computing relevant kwargs
from the given property. If `False`, return the `Field` class.
:return: A `marshmallow.fields.Field` class or instance.
"""
field_for = default_converter.field_for
"""Convert a property for a mapped SQLAlchemy class to a marshmallow `Field`.
Example: ::
date_created = field_for(Author, 'date_created', dump_only=True)
author = field_for(Book, 'author')
:param type model: A SQLAlchemy mapped class.
:param str property_name: The name of the property to convert.
:param kwargs: Extra keyword arguments to pass to `property2field`
:return: A `marshmallow.fields.Field` class or instance.
"""
|
|
"""Phonopy loader."""
# Copyright (C) 2018 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import phonopy.cui.load_helper as load_helper
from phonopy.api_phonopy import Phonopy
from phonopy.interface.calculator import get_default_physical_units
from phonopy.interface.phonopy_yaml import PhonopyYaml
from phonopy.structure.cells import get_primitive_matrix
def load(
phonopy_yaml=None, # phonopy.yaml-like must be the first argument.
supercell_matrix=None,
primitive_matrix=None,
is_nac=True,
calculator=None,
unitcell=None,
supercell=None,
nac_params=None,
unitcell_filename=None,
supercell_filename=None,
born_filename=None,
force_sets_filename=None,
force_constants_filename=None,
fc_calculator=None,
fc_calculator_options=None,
factor=None,
frequency_scale_factor=None,
produce_fc=True,
is_symmetry=True,
symmetrize_fc=True,
is_compact_fc=True,
store_dense_svecs=False,
symprec=1e-5,
log_level=0,
) -> Phonopy:
"""Create Phonopy instance from parameters and/or input files.
"phonopy_yaml"-like file is parsed unless crystal structure information
is given by unitcell_filename, supercell_filename, unitcell
(PhonopyAtoms-like), or supercell (PhonopyAtoms-like).
Even when "phonopy_yaml"-like file is parse, parameters except for
crystal structure can be overwritten.
Phonopy default files of 'FORCE_SETS' and 'BORN' are parsed when they
are found in current directory and those data are not yet provided by
other means.
Crystal structure
-----------------
Means to provide crystal structure(s) and their priority:
1. unitcell_filename (with supercell_matrix)
2. supercell_filename
3. unitcell (with supercell_matrix)
4. supercell.
5. phonopy_yaml
Force sets or force constants
-----------------------------
Optional. Means to provide information to generate force constants
and their priority:
1. force_constants_filename
2. force_sets_filename
3. phonopy_yaml if force constants are found in phonoy_yaml.
4. phonopy_yaml if forces are found in phonoy_yaml.dataset.
5. 'FORCE_CONSTANTS' is searched in current directory.
6. 'force_constants.hdf5' is searched in current directory.
7. 'FORCE_SETS' is searched in current directory.
When both of 3 and 4 are satisfied but not others, force constants and
dataset are stored in Phonopy instance, but force constants are not
produced from dataset.
Parameters for non-analytical term correctiion (NAC)
----------------------------------------------------
Optional. Means to provide NAC parameters and their priority:
1. born_filename
2. nac_params
3. phonopy_yaml.nac_params if existed and is_nac=True.
4. 'BORN' is searched in current directory when is_nac=True.
Parameters
----------
phonopy_yaml : str, optional
Filename of "phonopy.yaml"-like file. If this is given, the data
in the file are parsed. Default is None.
supercell_matrix : array_like, optional
Supercell matrix multiplied to input cell basis vectors.
shape=(3, ) or (3, 3), where the former is considered a diagonal
matrix. Default is the unit matrix.
dtype=int
primitive_matrix : array_like or str, optional
Primitive matrix multiplied to input cell basis vectors. Default is
None, which is equivalent to 'auto'.
For array_like, shape=(3, 3), dtype=float.
When 'F', 'I', 'A', 'C', or 'R' is given instead of a 3x3 matrix,
the primitive matrix for the character found at
https://spglib.github.io/spglib/definition.html
is used.
is_nac : bool, optional
If True, look for 'BORN' file. If False, NAS is turned off.
Default is True.
calculator : str, optional.
Calculator used for computing forces. This is used to switch the set
of physical units. Default is None, which is equivalent to "vasp".
unitcell : PhonopyAtoms, optional
Input unit cell. Default is None.
supercell : PhonopyAtoms, optional
Input supercell. With given, default value of primitive_matrix is set
to 'auto' (can be overwitten). supercell_matrix is ignored. Default is
None.
nac_params : dict, optional
Parameters required for non-analytical term correction. Default is
None.
{'born': Born effective charges
(array_like, shape=(primitive cell atoms, 3, 3), dtype=float),
'dielectric': Dielectric constant matrix
(array_like, shape=(3, 3), dtype=float),
'factor': unit conversion facotr (float)}
unitcell_filename : str, optional
Input unit cell filename. Default is None.
supercell_filename : str, optional
Input supercell filename. When this is specified, supercell_matrix is
ignored. Default is None.
born_filename : str, optional
Filename corresponding to 'BORN', a file contains non-analytical term
correction parameters.
force_sets_filename : str, optional
Filename of a file corresponding to 'FORCE_SETS', a file contains sets
of forces and displacements. Default is None.
force_constants_filename : str, optional
Filename of a file corresponding to 'FORCE_CONSTANTS' or
'force_constants.hdf5', a file contains force constants. Default is
None.
fc_calculator : str, optional
Force constants calculator. Currently only 'alm'. Default is None.
fc_calculator_options : str, optional
Optional parameters that are passed to the external fc-calculator.
This is given as one text string. How to parse this depends on the
fc-calculator. For alm, each parameter is splitted by comma ',',
and each set of key and value pair is written in 'key = value'.
factor : float, optional
Phonon frequency unit conversion factor. Unless specified, default
unit conversion factor for each calculator is used.
frequency_scale_factor : float, optional
Factor multiplied to calculated phonon frequency. Default is None,
i.e., effectively 1.
produce_fc : bool, optional
Setting False, force constants are not calculated from displacements
and forces. Default is True.
is_symmetry : bool, optional
Setting False, crystal symmetry except for lattice translation is not
considered. Default is True.
symmetrize_fc : bool, optional
Setting False, force constants are not symmetrized when creating
force constants from displacements and forces. Default is True.
is_compact_fc : bool, optional
Force constants are produced in the array whose shape is
True: (primitive, supecell, 3, 3)
False: (supercell, supecell, 3, 3)
where 'supercell' and 'primitive' indicate number of atoms in these
cells. Default is True.
store_dense_svecs : bool, optional
This is for the test use. Do not set True.
Default is False.
symprec : float, optional
Tolerance used to find crystal symmetry. Default is 1e-5.
log_level : int, optional
Verbosity control. Default is 0.
"""
if (
supercell is not None
or supercell_filename is not None
or unitcell is not None
or unitcell_filename is not None
): # noqa E129
cell, smat, pmat = load_helper.get_cell_settings(
supercell_matrix=supercell_matrix,
primitive_matrix=primitive_matrix,
unitcell=unitcell,
supercell=supercell,
unitcell_filename=unitcell_filename,
supercell_filename=supercell_filename,
calculator=calculator,
symprec=symprec,
log_level=log_level,
)
_calculator = calculator
_nac_params = nac_params
_dataset = None
_fc = None
elif phonopy_yaml is not None:
phpy_yaml = PhonopyYaml()
phpy_yaml.read(phonopy_yaml)
cell = phpy_yaml.unitcell
smat = phpy_yaml.supercell_matrix
if smat is None:
smat = np.eye(3, dtype="intc", order="C")
if primitive_matrix is not None:
pmat = get_primitive_matrix(primitive_matrix, symprec=symprec)
else:
pmat = phpy_yaml.primitive_matrix
if nac_params is not None:
_nac_params = nac_params
elif is_nac:
_nac_params = phpy_yaml.nac_params
else:
_nac_params = None
_dataset = phpy_yaml.dataset
_fc = phpy_yaml.force_constants
if calculator is None:
_calculator = phpy_yaml.calculator
else:
_calculator = calculator
else:
msg = "Cell information could not found. " "Phonopy instance loading failed."
raise RuntimeError(msg)
if log_level and _calculator is not None:
print('Set "%s" mode.' % _calculator)
# units keywords: factor, nac_factor, distance_to_A
units = get_default_physical_units(_calculator)
if factor is None:
_factor = units["factor"]
else:
_factor = factor
phonon = Phonopy(
cell,
smat,
primitive_matrix=pmat,
factor=_factor,
frequency_scale_factor=frequency_scale_factor,
symprec=symprec,
is_symmetry=is_symmetry,
store_dense_svecs=store_dense_svecs,
calculator=_calculator,
log_level=log_level,
)
# NAC params
if born_filename is not None or _nac_params is not None or is_nac:
ret_nac_params = load_helper.get_nac_params(
primitive=phonon.primitive,
nac_params=_nac_params,
born_filename=born_filename,
is_nac=is_nac,
nac_factor=units["nac_factor"],
log_level=log_level,
)
if ret_nac_params is not None:
phonon.nac_params = ret_nac_params
# Displacements, forces, and force constants
load_helper.set_dataset_and_force_constants(
phonon,
_dataset,
_fc,
force_constants_filename=force_constants_filename,
force_sets_filename=force_sets_filename,
fc_calculator=fc_calculator,
fc_calculator_options=fc_calculator_options,
produce_fc=produce_fc,
symmetrize_fc=symmetrize_fc,
is_compact_fc=is_compact_fc,
log_level=log_level,
)
return phonon
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to manage the GIFT launchpad PPA and l2tbinaries."""
import argparse
import csv
import gzip
import io
import json
import logging
import os
import platform
import re
import sys
import zlib
from xml.etree import ElementTree
from l2tdevtools import projects
from l2tdevtools import versions
from l2tdevtools.download_helpers import interface
from l2tdevtools.lib import definitions
class COPRProjectManager(object):
"""Defines a COPR project manager."""
_COPR_BASE_URL = 'https://copr.fedorainfracloud.org{0:s}'
_COPR_URL = (
'https://copr.fedorainfracloud.org/api_2/projects?group={name:s}&'
'name={project:s}')
_COPR_REPO_URL = (
'https://copr-be.cloud.fedoraproject.org/results/%40{name:s}/'
'{project:s}/fedora-{fedora_version:s}-x86_64')
_PRIMARY_XML_XPATH = (
'./{http://linux.duke.edu/metadata/repo}data[@type="primary"]/'
'{http://linux.duke.edu/metadata/repo}location')
def __init__(self, name, distribution=None):
"""Initializes a COPR manager.
Args:
name (str): name of the group.
distribution (Optional[str]): name of the distribution.
"""
super(COPRProjectManager, self).__init__()
self._distribution = distribution or definitions.DEFAULT_FEDORA_DISTRIBUTION
self._download_helper = interface.DownloadHelper('')
self._name = name
def GetPackages(self, project):
"""Retrieves a list of packages of a specific project.
Args:
project (str): project name.
Returns:
dict[str, str]: package names and versions as values or None if
the packages cannot be determined.
"""
# TODO: do not use builds information, it is incomplete
# instead use https://copr-be.cloud.fedoraproject.org/results/%40gift/
# testing/fedora-26-x86_64/repodata/repomd.xml
# to find primary.xml.gz or primary.sqlite.bz2
kwargs = {
'fedora_version': self._distribution,
'name': self._name,
'project': project}
copr_repo_url = self._COPR_REPO_URL.format(**kwargs)
download_url = '/'.join([copr_repo_url, 'repodata', 'repomd.xml'])
page_content = self._download_helper.DownloadPageContent(download_url)
if not page_content:
logging.error('Unable to retrieve repomd.xml.')
return None
repomd_xml = ElementTree.fromstring(page_content)
xml_elements = repomd_xml.findall(self._PRIMARY_XML_XPATH)
if not xml_elements or not xml_elements[0].items():
logging.error('Primary data type missing from repomd.xml.')
return None
href_value_tuple = xml_elements[0].items()[0]
if not href_value_tuple[1]:
logging.error('Primary data type missing from repomd.xml.')
return None
download_url = '/'.join([copr_repo_url, href_value_tuple[1]])
page_content = self._download_helper.DownloadPageContent(
download_url, encoding=None)
if not page_content:
_, _, download_url = download_url.rpartition('/')
logging.error('Unable to retrieve primary.xml.gz.')
return None
with gzip.GzipFile(fileobj=io.BytesIO(page_content)) as file_object:
page_content = file_object.read()
primary_xml = ElementTree.fromstring(page_content)
# Note explicitly checking xml.Element against None because of deprecation
# warning.
if primary_xml is None:
logging.error('Packages missing from primary.xml.')
return None
packages = {}
for project_xml in primary_xml:
arch_xml = project_xml.find('{http://linux.duke.edu/metadata/common}arch')
if arch_xml is None or arch_xml.text != 'src':
continue
package_name_xml = project_xml.find(
'{http://linux.duke.edu/metadata/common}name')
package_version_xml = project_xml.find(
'{http://linux.duke.edu/metadata/common}version')
if package_name_xml is None or package_version_xml is None:
continue
package_name = package_name_xml.text
package_version = package_version_xml.attrib['ver']
if not package_name or not package_version:
continue
if package_name in packages:
package_version_tuple = package_version.split('.')
version_tuple = packages[package_name].split('.')
compare_result = versions.CompareVersions(
package_version_tuple, version_tuple)
if compare_result < 0:
continue
packages[package_name] = package_version
return packages
class GithubRepoManager(object):
"""Defines a GitHub repository manager."""
_GITHUB_REPO_API_URL = (
'https://api.github.com/repos/log2timeline/l2tbinaries')
_GITHUB_REPO_URL = (
'https://github.com/log2timeline/l2tbinaries')
def __init__(self):
"""Initializes a GitHub repository manager."""
super(GithubRepoManager, self).__init__()
self._download_helper = interface.DownloadHelper('')
def _GetDownloadURL(self, sub_directory, track, use_api=False):
"""Retrieves the download URL.
Args:
sub_directory (str): machine type sub directory.
track (str): track name.
use_api (Optional[bool]): True if the API should be used.
Returns:
str: download URL or None if sub directory is missing.
"""
if not sub_directory:
return None
if track == 'stable':
branch = 'main'
else:
branch = track
if use_api:
download_url = '{0:s}/contents/{1:s}?ref={2:s}'.format(
self._GITHUB_REPO_API_URL, sub_directory, branch)
else:
download_url = '{0:s}/tree/{1:s}/{2:s}'.format(
self._GITHUB_REPO_URL, branch, sub_directory)
return download_url
def GetPackages(self, sub_directory, track, use_api=False):
"""Retrieves a list of packages of a specific sub directory.
Args:
sub_directory (str): machine type sub directory.
track (str): track name.
use_api (Optional[bool]): True if the API should be used.
Returns:
dict[str, str]: package names and versions as values or None if
the packages cannot be determined.
"""
if not sub_directory:
logging.info('Missing machine type sub directory.')
return None
download_url = self._GetDownloadURL(sub_directory, track, use_api=use_api)
if not download_url:
logging.info('Missing download URL.')
return None
page_content = self._download_helper.DownloadPageContent(download_url)
if not page_content:
return None
filenames = []
if use_api:
# The page content consist of JSON data that contains a list of dicts.
# Each dict consists of:
# {
# "name":"PyYAML-3.11.win-amd64-py2.7.msi",
# "path":"win64/PyYAML-3.11.win-amd64-py2.7.msi",
# "sha":"8fca8c1e2549cf54bf993c55930365d01658f418",
# "size":196608,
# "url":"https://api.github.com/...",
# "html_url":"https://github.com/...",
# "git_url":"https://api.github.com/...",
# "download_url":"https://raw.githubusercontent.com/...",
# "type":"file",
# "_links":{
# "self":"https://api.github.com/...",
# "git":"https://api.github.com/...",
# "html":"https://github.com/..."
# }
# }
for directory_entry in json.loads(page_content):
filename = directory_entry.get('name', None)
if filename:
filenames.append(filename)
else:
# The format of the download URL is:
# <a class="js-navigation-open" title="{title}" id="{id}" href="{path}"
expression_string = (
'<a class="js-navigation-open" title="[^"]*" id="[^"]*" '
'href="([^"]*)"')
matches = re.findall(expression_string, page_content)
for match in matches:
_, _, filename = match.rpartition('/')
filenames.append(filename)
packages = {}
for filename in filenames:
if not filename or not filename.endswith('.msi'):
continue
if sub_directory == 'win32':
filename, _, _ = filename.rpartition('.win32')
elif sub_directory == 'win64':
filename, _, _ = filename.rpartition('.win-amd64')
else:
continue
name, _, version = filename.rpartition('-')
packages[name] = version
return packages
class LaunchpadPPAManager(object):
"""Defines a Launchpad PPA manager."""
_LAUNCHPAD_URL = (
'http://ppa.launchpad.net/{name:s}/{track:s}/ubuntu/dists'
'/{distribution:s}/main/source/Sources.gz')
def __init__(self, name, distribution=None):
"""Initializes a Launchpad PPA manager.
Args:
name (str): name of the PPA.
distribution (Optional[str]): name of the distribution.
"""
super(LaunchpadPPAManager, self).__init__()
self._distribution = distribution or definitions.DEFAULT_UBUNTU_DISTRIBUTION
self._download_helper = interface.DownloadHelper('')
self._name = name
def CopyPackages(self):
"""Copies packages."""
# TODO: implement:
# send post to https://launchpad.net/~gift/+archive/ubuntu/testing
# /+copy-packages
return
def GetPackages(self, track):
"""Retrieves a list of packages of a specific PPA track.
Args:
track (str): PPA track name.
Returns:
dict[str, str]: package names and versions as values or None if
the packages cannot be determined.
"""
kwargs = {
'distribution': self._distribution,
'name': self._name,
'track': track}
download_url = self._LAUNCHPAD_URL.format(**kwargs)
ppa_sources = self._download_helper.DownloadPageContent(
download_url, encoding=None)
if not ppa_sources:
logging.error('Unable to retrieve PPA sources list.')
return None
ppa_sources = zlib.decompress(ppa_sources, 16 + zlib.MAX_WBITS)
try:
ppa_sources = ppa_sources.decode('utf-8')
except UnicodeDecodeError as exception:
logging.error(
'Unable to decode PPA sources list with error: {0!s}'.format(
exception))
return None
packages = {}
for line in ppa_sources.split('\n'):
if line.startswith('Package: '):
_, _, package = line.rpartition('Package: ')
elif line.startswith('Version: '):
_, _, version = line.rpartition('Version: ')
version, _, _ = version.rpartition('-')
packages[package] = version
return packages
class OpenSuseBuildServiceManager(object):
"""Defines an OpenSuse build service manager object."""
# http://download.opensuse.org/repositories/home:/joachimmetz:/testing/
# Fedora_22/src/
class PyPIManager(object):
"""Defines a PyPI manager."""
_PYPI_URL = 'https://pypi.python.org/pypi/{package_name:s}'
def __init__(self, projects_file):
"""Initializes a PyPI manager.
Args:
projects_file (str): path to the projects.ini file.
"""
super(PyPIManager, self).__init__()
self._download_helper = interface.DownloadHelper('')
self._package_names = []
self._pypi_package_names = {}
if projects_file:
with io.open(projects_file, 'r', encoding='utf-8') as file_object:
project_definition_reader = projects.ProjectDefinitionReader()
for project_definition in project_definition_reader.Read(file_object):
self._package_names.append(project_definition.name)
if project_definition.pypi_name:
self._pypi_package_names[project_definition.pypi_name] = (
project_definition.name)
def CopyPackages(self):
"""Copies packages."""
# TODO: implement:
# send post to https://launchpad.net/~gift/+archive/ubuntu/testing
# /+copy-packages
return
def GetPackages(self):
"""Retrieves a list of packages.
Returns:
dict[str, str]: package names and versions as values or None if
the packages cannot be determined.
"""
packages = {}
for package_name in self._package_names:
pypi_package_name = self._pypi_package_names.get(
package_name, package_name)
kwargs = {'package_name': pypi_package_name}
download_url = self._PYPI_URL.format(**kwargs)
page_content = self._download_helper.DownloadPageContent(download_url)
if not page_content:
logging.error('Unable to retrieve PyPI package: {0:s} page.'.format(
pypi_package_name))
continue
try:
page_content = page_content.decode('utf-8')
except UnicodeDecodeError as exception:
logging.error((
'Unable to decode PyPI package: {0:s} page with error: '
'{1:s}').format(pypi_package_name, exception))
continue
expression_string = (
'<title>{0:s} ([^ ]*) : Python Package Index</title>'.format(
pypi_package_name))
matches = re.findall(expression_string, page_content)
if not matches or len(matches) != 1:
logging.warning(
'Unable to determine PyPI package: {0:s} information.'.format(
pypi_package_name))
continue
packages[package_name] = matches
return packages
class PackagesManager(object):
"""Manages packages across various repositories."""
def __init__(self, projects_file, distribution=None):
"""Initializes a packages manager.
Args:
projects_file (str): path to the projects.ini file.
distribution (Optional[str]): name of the distribution.
"""
fedora_distribution = (
distribution or definitions.DEFAULT_FEDORA_DISTRIBUTION)
ubuntu_distribution = (
distribution or definitions.DEFAULT_UBUNTU_DISTRIBUTION)
super(PackagesManager, self).__init__()
self._copr_project_manager = COPRProjectManager(
'gift', distribution=fedora_distribution)
self._github_repo_manager = GithubRepoManager()
self._launchpad_ppa_manager = LaunchpadPPAManager(
'gift', distribution=ubuntu_distribution)
self._pypi_manager = PyPIManager(projects_file)
self._ubuntu_distribution = ubuntu_distribution
def _ComparePackages(self, reference_packages, packages):
"""Compares the packages.
Args:
reference_packages (dict[str, str]): reference package names and versions.
packages (dict[str, str]): package names and versions.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference packages but not in the packages.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference packages.
"""
new_packages = {}
new_versions = {}
for name, version in iter(reference_packages.items()):
if not packages or name not in packages:
new_packages[name] = version
continue
version_tuple = packages[name].split('.')
new_version_tuple = version.split('.')
compare_result = versions.CompareVersions(
version_tuple, new_version_tuple)
if compare_result < 0:
new_versions[name] = version
return new_packages, new_versions
def CompareDirectoryWithCOPRProject(self, reference_directory, project):
"""Compares a directory containing source rpm packages with a COPR project.
Args:
reference_directory (str): path of the reference directory that contains
dpkg source packages.
project (str): name of the COPR project.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference directory but not in the project.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference directory.
"""
reference_packages = {}
for directory_entry in os.listdir(reference_directory):
# The directory contains various files and we are only interested
# in the source RPM packages that use the naming convention:
# package-version-#.src.rpm
if not directory_entry.endswith('.src.rpm'):
continue
package_name, _, _ = directory_entry.rpartition('-')
package_name, _, package_version = package_name.rpartition('-')
if package_name in reference_packages:
package_version_tuple = package_version.split('.')
version_tuple = reference_packages[package_name].split('.')
compare_result = versions.CompareVersions(
package_version_tuple, version_tuple)
if compare_result < 0:
continue
reference_packages[package_name] = package_version
packages = self._copr_project_manager.GetPackages(project)
return self._ComparePackages(reference_packages, packages)
def CompareDirectoryWithCSV(self, reference_directory, csv_file):
"""Compares a directory containing source packages with a CSV file.
Args:
reference_directory (str): path of the reference directory that contains
dpkg source packages.
csv_file (str): path of the CSV file.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference directory but not in the project.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference directory.
"""
reference_packages = {}
for directory_entry in os.listdir(reference_directory):
# The directory contains various files and we are only interested
# in the source packages that use the naming convention:
# package-version-#.tar.gz
# package-version-#.zip
if (not directory_entry.endswith('.tar.gz') and
not directory_entry.endswith('.zip')):
continue
if (directory_entry.endswith('.debian.tar.gz') or
directory_entry.endswith('.orig.tar.gz') or
directory_entry.endswith('-1.tar.gz')):
continue
package_name, _, _ = directory_entry.rpartition('.')
if package_name.endswith('.tar'):
package_name, _, _ = package_name.rpartition('.')
package_name, _, package_version = package_name.rpartition('-')
if (package_name.endswith('-alpha') or
package_name.endswith('-beta') or
package_name.endswith('-experimental')):
package_name, _, _ = package_name.rpartition('-')
if package_name in reference_packages:
package_version_tuple = package_version.split('.')
version_tuple = reference_packages[package_name].split('.')
compare_result = versions.CompareVersions(
package_version_tuple, version_tuple)
if compare_result < 0:
continue
reference_packages[package_name] = package_version
packages = {}
with open(csv_file, 'r') as file_object:
for row in csv.DictReader(file_object):
packages[row['project']] = row['version']
return self._ComparePackages(reference_packages, packages)
def CompareDirectoryWithGithubRepo(
self, reference_directory, sub_directory, track):
"""Compares a directory containing msi or dmg packages with a GitHub repo.
Args:
reference_directory (str): path of the reference directory that contains
msi or dmg packages.
sub_directory (str): name of the machine type sub directory.
track (str): name of the track.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference directory but not in the sub
directory.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference directory.
"""
reference_packages = {}
for directory_entry in os.listdir(reference_directory):
if directory_entry.endswith('.dmg'):
directory_entry, _, _ = directory_entry.rpartition('.dmg')
elif directory_entry.endswith('.msi'):
if sub_directory == 'win32':
directory_entry, _, _ = directory_entry.rpartition('.win32')
elif sub_directory == 'win64':
directory_entry, _, _ = directory_entry.rpartition('.win-amd64')
else:
continue
package_name, _, package_version = directory_entry.rpartition('-')
if package_name in reference_packages:
package_version_tuple = package_version.split('.')
version_tuple = reference_packages[package_name].split('.')
compare_result = versions.CompareVersions(
package_version_tuple, version_tuple)
if compare_result < 0:
continue
reference_packages[package_name] = package_version
packages = self._github_repo_manager.GetPackages(sub_directory, track)
return self._ComparePackages(reference_packages, packages)
def CompareDirectoryWithLaunchpadPPATrack(
self, reference_directory, track):
"""Compares a directory containing dpkg packages with a Launchpad PPA track.
Args:
reference_directory (str): path of the reference directory that contains
dpkg source packages.
track (str): name of the track.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference directory but not in the track.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference directory.
"""
reference_packages = {}
for directory_entry in os.listdir(reference_directory):
# The directory contains various files and we are only interested
# in the source dpkg packages that use the naming convention:
# package_version-#ppa1~distribution_source.changes
name_suffix = 'ppa1~{0:s}_source.changes'.format(
self._ubuntu_distribution)
if not directory_entry.endswith(name_suffix):
continue
package_name, _, _ = directory_entry.rpartition('-')
package_name, _, package_version = package_name.rpartition('_')
if package_name in reference_packages:
package_version_tuple = package_version.split('.')
version_tuple = reference_packages[package_name].split('.')
compare_result = versions.CompareVersions(
package_version_tuple, version_tuple)
if compare_result < 0:
continue
reference_packages[package_name] = package_version
packages = self._launchpad_ppa_manager.GetPackages(track)
return self._ComparePackages(reference_packages, packages)
def CompareCOPRProjects(self, reference_project, project):
"""Compares two COPR projects.
Args:
reference_project (str): name of the reference project.
project (str): name of the project.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference project but not in the project.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference project.
"""
reference_packages = self._copr_project_manager.GetPackages(
reference_project)
packages = self._copr_project_manager.GetPackages(project)
return self._ComparePackages(reference_packages, packages)
def CompareGithubRepos(self, sub_directory, reference_track, track):
"""Compares two GitHub repos PPA tracks.
Args:
sub_directory (str): name of the machine type sub directory.
reference_track (str): name of the reference track.
track (str): name of the track.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference track but not in the track.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference track.
"""
reference_packages = self._github_repo_manager.GetPackages(
sub_directory, reference_track)
packages = self._github_repo_manager.GetPackages(sub_directory, track)
return self._ComparePackages(reference_packages, packages)
def CompareLaunchpadPPATracks(self, reference_track, track):
"""Compares two Launchpad PPA tracks.
Args:
reference_track (str): name of the reference track.
track (str): name of the track.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference track but not in the track.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference track.
"""
reference_packages = self._launchpad_ppa_manager.GetPackages(
reference_track)
packages = self._launchpad_ppa_manager.GetPackages(track)
return self._ComparePackages(reference_packages, packages)
def CompareDirectoryWithPyPI(self, reference_directory):
"""Compares a directory containing .tar.gz packages with PyPI.
Args:
reference_directory (str): path of the reference directory that
contains msi or dmg packages.
Returns:
tuple: containing:
dict[str, str]: new package names and versions. New packages are those
that are present in the reference directory but not on PyPI.
dict[str, str]: newer existing package names and versions. Newer
existing packages are those that have a newer version in the
reference directory.
"""
reference_packages = {}
for directory_entry in os.listdir(reference_directory):
if not directory_entry.endswith('.tar.gz'):
continue
directory_entry, _, _ = directory_entry.rpartition('.tar.gz')
name, _, version = directory_entry.rpartition('-')
if (name.endswith('-alpha') or name.endswith('-beta') or
name.endswith('-experimental')):
name, _, _ = name.rpartition('-')
reference_packages[name] = version
packages = self._pypi_manager.GetPackages()
return self._ComparePackages(reference_packages, packages)
def GetMachineTypeSubDirectory(
self, preferred_machine_type=None, preferred_operating_system=None):
"""Retrieves the machine type sub directory.
Args:
preferred_machine_type (Optional[str]): preferred machine type, where
None, which will auto-detect the current machine type.
preferred_operating_system (Optional[str]): preferred operating system,
where None, which will auto-detect the current operating system.
Returns:
str: machine type sub directory or None if system configuration is not
supported.
"""
if preferred_operating_system:
operating_system = preferred_operating_system
else:
operating_system = platform.system()
if preferred_machine_type:
cpu_architecture = preferred_machine_type
else:
cpu_architecture = platform.machine().lower()
sub_directory = None
if operating_system == 'Windows':
if cpu_architecture == 'x86':
sub_directory = 'win32'
elif cpu_architecture == 'amd64':
sub_directory = 'win64'
else:
logging.error('CPU architecture: {0:s} not supported.'.format(
cpu_architecture))
return None
else:
logging.error('Operating system: {0:s} not supported.'.format(
operating_system))
return None
return sub_directory
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
actions = frozenset([
'copr-diff-dev', 'copr-diff-stable', 'copr-diff-staging',
'copr-diff-testing', 'csv-diff', 'l2tbinaries-diff-dev',
'l2tbinaries-diff-stable', 'l2tbinaries-diff-staging',
'l2tbinaries-diff-testing', 'launchpad-diff-dev', 'launchpad-diff-stable',
'launchpad-diff-staging', 'launchpad-diff-testing', 'pypi-diff'])
argument_parser = argparse.ArgumentParser(description=(
'Manages the GIFT copr, launchpad PPA and l2tbinaries.'))
argument_parser.add_argument(
'action', choices=sorted(actions), action='store',
metavar='ACTION', default=None, help='The action.')
argument_parser.add_argument(
'--build-directory', '--build_directory', action='store',
metavar='DIRECTORY', dest='build_directory', type=str,
default=os.path.join('..', 'l2tbuilds'), help=(
'The location of the build directory.'))
argument_parser.add_argument(
'-c', '--config', dest='config_path', action='store',
metavar='CONFIG_PATH', default=None, help=(
'path of the directory containing the build configuration '
'files e.g. projects.ini.'))
argument_parser.add_argument(
'--csv-file', '--csv_file', action='store', metavar='FILE',
dest='csv_file', type=str, default='', help=(
'The location of the CSV file.'))
argument_parser.add_argument(
'--distribution', action='store', metavar='NAME', dest='distribution',
type=str, default=None, help='The name or version of the distribution.')
argument_parser.add_argument(
'--machine-type', '--machine_type', action='store', metavar='TYPE',
dest='machine_type', type=str, default=None, help=(
'Manually sets the machine type instead of using the value returned '
'by platform.machine(). Usage of this argument is not recommended '
'unless want to force the installation of one machine type e.g. '
'\'x86\' onto another \'amd64\'.'))
options = argument_parser.parse_args()
if not options.action:
print('Missing action.')
print('')
argument_parser.print_help()
print('')
return False
config_path = options.config_path
if not config_path:
config_path = os.path.dirname(__file__)
config_path = os.path.dirname(config_path)
config_path = os.path.join(config_path, 'data')
projects_file = os.path.join(config_path, 'projects.ini')
if not os.path.exists(projects_file):
print('No such config file: {0:s}.'.format(projects_file))
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
# TODO: add action to upload files to PPA.
# TODO: add action to copy files between PPA tracks.
# TODO: add pypi support.
packages_manager = PackagesManager(
projects_file, distribution=options.distribution)
action_tuple = options.action.split('-')
if action_tuple[0] == 'copr' and action_tuple[1] == 'diff':
track = action_tuple[2]
if track == 'testing':
reference_directory = options.build_directory
new_packages, new_versions = (
packages_manager.CompareDirectoryWithCOPRProject(
reference_directory, track))
diff_header = (
'Difference between: {0:s} and COPR project: {1:s}'.format(
reference_directory, track))
else:
if track == 'dev':
reference_track = 'testing'
elif track == 'staging':
reference_track = 'dev'
else:
reference_track = 'staging'
new_packages, new_versions = packages_manager.CompareCOPRProjects(
reference_track, track)
diff_header = (
'Difference between COPR project: {0:s} and {1:s}'.format(
reference_track, track))
elif action_tuple[0] == 'csv' and action_tuple[1] == 'diff':
reference_directory = options.build_directory
new_packages, new_versions = (
packages_manager.CompareDirectoryWithCSV(
reference_directory, options.csv_file))
diff_header = (
'Difference between: {0:s} and CSV'.format(reference_directory))
elif action_tuple[0] == 'l2tbinaries' and action_tuple[1] == 'diff':
track = action_tuple[2]
sub_directory = packages_manager.GetMachineTypeSubDirectory(
preferred_machine_type=options.machine_type)
if track == 'testing':
reference_directory = options.build_directory
new_packages, new_versions = (
packages_manager.CompareDirectoryWithGithubRepo(
reference_directory, sub_directory, track))
diff_header = (
'Difference between: {0:s} and testing for: {1:s}'.format(
reference_directory, sub_directory))
else:
if track == 'dev':
reference_track = 'testing'
elif track == 'staging':
reference_track = 'dev'
else:
reference_track = 'staging'
new_packages, new_versions = packages_manager.CompareGithubRepos(
sub_directory, reference_track, track)
diff_header = (
'Difference between l2tbinaries tracks: {0:s} and {1:s} for: '
'{2:s}').format(reference_track, track, sub_directory)
elif action_tuple[0] == 'launchpad' and action_tuple[1] == 'diff':
track = action_tuple[2]
if track == 'testing':
reference_directory = options.build_directory
new_packages, new_versions = (
packages_manager.CompareDirectoryWithLaunchpadPPATrack(
reference_directory, track))
diff_header = (
'Difference between: {0:s} and Launchpad track: {1:s}'.format(
reference_directory, track))
else:
if track == 'dev':
reference_track = 'testing'
elif track == 'staging':
reference_track = 'dev'
else:
reference_track = 'staging'
new_packages, new_versions = packages_manager.CompareLaunchpadPPATracks(
reference_track, track)
diff_header = (
'Difference between Launchpad tracks: {0:s} and {1:s}'.format(
reference_track, track))
# elif action_tuple[0] == 'osb' and action_tuple[1] == 'diff':
elif action_tuple[0] == 'pypi' and action_tuple[1] == 'diff':
reference_directory = options.build_directory
new_packages, new_versions = (
packages_manager.CompareDirectoryWithPyPI(reference_directory))
diff_header = (
'Difference between: {0:s} and release'.format(reference_directory))
if action_tuple[1] == 'diff':
print(diff_header)
print('')
print('New packages:')
for package in sorted(new_packages.keys()):
print(' {0:s}'.format(package))
print('')
print('New versions:')
for package in sorted(new_versions.keys()):
print(' {0:s}'.format(package))
print('')
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations(object):
"""ExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitConnection"
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "_models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "_models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitConnection"]
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitConnectionListResult"]
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore
|
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent.futures import ThreadPoolExecutor
import json
import numbers
import time
import os
from functools import partial
import logging
import six
from six import string_types as basestring
from six.moves import zip
from reference_models.common import mpool
import sas
import sas_testcase
import test_harness_objects
from full_activity_dump import FullActivityDump
from full_activity_dump_helper import getFullActivityDumpSasTestHarness, getFullActivityDumpSasUut
import common_strings
from util import winnforum_testcase, configurable_testcase, getCertificateFingerprint, writeConfig, \
loadConfig, makePpaAndPalRecordsConsistent, getFqdnLocalhost, \
getUnusedPort, getCertFilename, json_load
from sas_test_harness import SasTestHarnessServer, generateCbsdRecords, \
generatePpaRecords, generateCbsdReferenceId
from reference_models.dpa import dpa_mgr
from reference_models.common import data
from reference_models.pre_iap_filtering import pre_iap_filtering
from reference_models.iap import iap
from reference_models.interference import aggregate_interference, interference
from reference_models.geo import utils as geoutils
from reference_models.geo import drive
DELTA_IAP = 1 # Threshold value in dBm
ONE_MHZ = 1000000
LOW_FREQUENCY_LIMIT_HZ = 3550000000
class McpXprCommonTestcase(sas_testcase.SasTestCase):
def getEmptyCbsdRequestsWithDomainProxies(self, number_of_elements):
empty_cbsd_records_domain_proxy = {
'registrationRequests': [],
'grantRequests': [],
'conditionalRegistrationData': []
}
return [empty_cbsd_records_domain_proxy] * number_of_elements
def checkMcpConfig(self, config, test_type):
self.assertIn(test_type, ('MCP', 'xPR1', 'xPR2'))
def checkFadConfiguration(fad_config):
self.assertValidConfig(fad_config, {
'cbsdRecords': list
})
self.assertValidConfig(
config, {
'initialCbsdRequestsWithDomainProxies': list,
'initialCbsdRecords': list,
'iterationData': list,
'sasTestHarnessConfigs': list,
'domainProxyConfigs': list
}, {'dpas': dict})
# Check each iteration's data.
for iteration_data in config['iterationData']:
self.assertValidConfig(
iteration_data, {
'cbsdRequestsWithDomainProxies': list,
'cbsdRecords': list,
'protectedEntities': dict,
'dpaActivationList': list,
'dpaDeactivationList': list,
'sasTestHarnessData': list
})
self.assertEqual(
len(iteration_data['cbsdRequestsWithDomainProxies']),
len(config['domainProxyConfigs']),
'Mismatch in the number of domain proxies and the configuration for this iteration: %s.'
% str(iteration_data))
self.assertEqual(
len(config['initialCbsdRequestsWithDomainProxies']),
len(config['domainProxyConfigs']),
'Mismatch in the number of domain proxies and the configuration of initial CbsdRequests')
for domain_proxy in iteration_data['cbsdRequestsWithDomainProxies']:
self.assertValidConfig(
domain_proxy, {
'registrationRequests': list,
'grantRequests': list,
'conditionalRegistrationData': list
})
for cbsd_record in iteration_data['cbsdRecords']:
self.assertValidConfig(
cbsd_record, {
'registrationRequest': dict,
'grantRequest': dict,
'clientCert': basestring,
'clientKey': basestring
}, {'conditionalRegistrationData': dict})
if 'conditionalRegistrationData' in cbsd_record:
self.assertIn('fccId', cbsd_record['conditionalRegistrationData'])
self.assertIn('cbsdSerialNumber',
cbsd_record['conditionalRegistrationData'])
self.assertValidConfig(
iteration_data['protectedEntities'],
{},
{
# All fields are optional.
'palRecords': list,
'ppaRecords': list,
'escRecords': list,
'gwpzRecords': list,
'gwblRecords': list,
'fssRecords': list,
})
if 'dpas' in config:
for dpa in iteration_data['dpaActivationList'] + iteration_data['dpaDeactivationList']:
self.assertValidConfig(dpa, {
'dpaId': basestring,
'frequencyRange': dict
})
self.assertEqual(
len(iteration_data['sasTestHarnessData']),
len(config['sasTestHarnessConfigs']),
'Mismatch in the number of SAS test harnesses and the configuration for this iteration: %s.'
% str(iteration_data))
for sas_test_harness_data in iteration_data['sasTestHarnessData']:
checkFadConfiguration(sas_test_harness_data)
# Check SAS test harnesses.
for sas_test_harness_config in config['sasTestHarnessConfigs']:
self.assertValidConfig(
sas_test_harness_config, {
'sasTestHarnessName': basestring,
'hostName': basestring,
'port': int,
'serverCert': basestring,
'serverKey': basestring,
'caCert': basestring
})
# Check domain proxies.
for domain_proxy_config in config['domainProxyConfigs']:
self.assertValidConfig(domain_proxy_config, {
'cert': basestring,
'key': basestring
})
# Special xPR-only checks.
if test_type == 'MCP':
return
# Max. 1 iteration.
self.assertEqual(
len(config['iterationData']), 1,
'XPR test cases only have one iteration.')
# No DPAs.
if 'dpas' in config:
self.assertEqual(
len(config['dpas']), 0,
'XPR does not permit the use of DPAs.')
# Max. one protected entity.
protected_entities = config['iterationData'][0]['protectedEntities']
def getNumberOfEntities(entity_type):
if entity_type not in protected_entities:
return 0
else:
return len(protected_entities[entity_type])
if getNumberOfEntities('ppaRecords') > 0 or getNumberOfEntities(
'palRecords') > 0:
self.assertGreaterEqual(
getNumberOfEntities('ppaRecords'), 1,
'Must define at least one PPA record.')
self.assertGreaterEqual(
getNumberOfEntities('palRecords'), 1,
'Must define at least one PAL record if a PPA is to be injected.')
for entity_type in [
'escRecords', 'gwpzRecords', 'gwblRecords', 'fssRecords'
]:
self.assertEqual(
getNumberOfEntities(entity_type), 0,
'May not define any additional protected entities in an xPR test case; found extra entities in "%s".'
% entity_type)
else:
entity_count = 0
for entity_type in [
'escRecords', 'gwpzRecords', 'gwblRecords', 'fssRecords'
]:
entity_count += getNumberOfEntities(entity_type)
self.assertEqual(
entity_count, 1,
'Must define exactly one protected entity for an xPR test case.')
if test_type == 'xPR1':
# No SAS test harnesses.
self.assertEqual(
len(config['sasTestHarnessConfigs']), 0,
'xPR.1 does not permit the use of SAS Test Harnesses.')
def executeMcpTestSteps(self, config, test_type):
"""Execute all teststeps for MCP testcase and for xPR testcases till Step22
Args:
config: Testcase configuration
test_type: A string which indicates the type of testcase to be invoked("MCP"/"xPR1"/"xPR2")
"""
self.checkMcpConfig(config, test_type)
# Initialize test-wide variables, and state variables.
self.config = config
self.active_dpas = []
self.test_type = test_type
self.sas_test_harness_objects = []
self.domain_proxy_objects = []
self.protected_entity_records = {}
self.num_peer_sases = len(config['sasTestHarnessConfigs'])
self.cpas_executor = ThreadPoolExecutor(max_workers=1)
self.agg_interf_check_executor = ThreadPoolExecutor(max_workers=1)
logging.info('Running test type "%s" with %d SAS test harnesses.',
self.test_type, self.num_peer_sases)
self.sas_uut_fad = None
self.test_harness_fads = []
self.all_dpa_checks_succeeded = True
logging.info('Creating domain proxies.')
for domain_proxy in config['domainProxyConfigs']:
self.domain_proxy_objects.append(test_harness_objects.DomainProxy(
self,
domain_proxy['cert'],
domain_proxy['key']))
if test_type == 'MCP':
# Step 1: Load DPAs
logging.info('Step 1: load DPAs')
self._sas_admin.TriggerLoadDpas()
# Step 2: ESC informs SAS about inactive DPAs
logging.info('Step 2: deactivate all DPAs')
self._sas_admin.TriggerBulkDpaActivation({'activate': False})
else:
logging.info('Skipping steps 1 and 2 because this is an xPR test.')
# Step 3: creates multiple SAS THs.
logging.info('Step 3: create %d SAS test harnesses' % self.num_peer_sases)
if self.num_peer_sases:
for test_harness in config['sasTestHarnessConfigs']:
logging.info('Creating SAS TH with config %s', test_harness)
# Initialize SAS Test Harness Server instance to dump FAD records
sas_test_harness_object = SasTestHarnessServer(test_harness['sasTestHarnessName'],
test_harness['hostName'],
test_harness['port'],
test_harness['serverCert'],
test_harness['serverKey'],
test_harness['caCert'])
# Start the server
sas_test_harness_object.start()
# informing SAS UUT about SAS Test Harnesses
certificate_hash = getCertificateFingerprint(test_harness['serverCert'])
self._sas_admin.InjectPeerSas({'certificateHash': certificate_hash,
'url': sas_test_harness_object.getBaseUrl()})
self.sas_test_harness_objects.append(sas_test_harness_object)
# Set the cert/key to use when requesting FADs from SAS UUT.
self.fad_cert = config['sasTestHarnessConfigs'][0]['serverCert']
self.fad_key = config['sasTestHarnessConfigs'][0]['serverKey']
# Step 4: Register + Grant.
logging.info('Step 4: Register + Grant initial CBSDs.')
self.registerAndGrantCbsds(config['initialCbsdRequestsWithDomainProxies'],
config['initialCbsdRecords'])
# Step 5: Complete each iteration of the testcase.
for iteration_number, iteration_content in enumerate(config['iterationData']):
logging.info('Step 5: execute iteration number %d', iteration_number)
# Execute steps for single iteration
self.executeSingleMCPIteration(iteration_content)
# Fail now (at the end) if any DPA check failed anywhere in the test.
self.assertTrue(
self.all_dpa_checks_succeeded,
'At least one DPA check failed; please see logs for details.')
# Stop test harness servers.
for test_harness in self.sas_test_harness_objects:
test_harness.shutdown()
del test_harness
self.cpas_executor.shutdown()
self.agg_interf_check_executor.shutdown()
def executeSingleMCPIteration(self, iteration_content):
"""Executes the steps from Step 6 to [end] for MCP and XPR testcases
Args:
iteration_content: A dictionary with multiple key-value pairs that contain iteration data
"""
# Step 6: Inject protected entities into UUT
logging.info('Step 6: inject new protected entities into SAS UUT')
if 'fssRecords' in iteration_content['protectedEntities']:
logging.info('Injecting FSS records.')
for index, fss_record in enumerate(iteration_content['protectedEntities']['fssRecords']):
try:
logging.info('Injecting FSS record #%d', index)
self._sas_admin.InjectFss(fss_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
if 'gwblRecords' in iteration_content['protectedEntities']:
logging.info('Injecting GWBL records.')
for index, gwbl_record in enumerate(iteration_content['protectedEntities']['gwblRecords']):
try:
logging.info('Injecting GWBL record #%d', index)
self._sas_admin.InjectWisp(gwbl_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
if 'gwpzRecords' in iteration_content['protectedEntities']:
logging.info('Injecting GWPZ records.')
for index, gwpz_record in enumerate(iteration_content['protectedEntities']['gwpzRecords']):
try:
logging.info('Injecting GWPZ record #%d', index)
self._sas_admin.InjectWisp(gwpz_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
grid_points = geoutils.GridPolygon(
gwpz_record['zone']['features'][0]['geometry'], res_arcsec=1)
gwpz_record['landCategory'] = drive.nlcd_driver.RegionNlcdVote(
[(pt[1], pt[0]) for pt in grid_points])
logging.info('Land category: %s', gwpz_record['landCategory'])
if 'escRecords' in iteration_content['protectedEntities']:
logging.info('Injecting ESC records.')
for index, esc_record in enumerate(iteration_content['protectedEntities']['escRecords']):
try:
logging.info('Injecting ESC record #%d', index)
self._sas_admin.InjectEscSensorDataRecord({'record': esc_record})
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
if 'palRecords' in iteration_content['protectedEntities']:
logging.info('Injecting PAL records.')
for index, pal_record in enumerate(iteration_content['protectedEntities']['palRecords']):
try:
logging.info('Injecting PAL record #%d', index)
self._sas_admin.InjectPalDatabaseRecord(pal_record)
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
if 'ppaRecords' in iteration_content['protectedEntities']:
logging.info('Injecting PPA records.')
for index, ppa_record in enumerate(iteration_content['protectedEntities']['ppaRecords']):
try:
logging.info('Injecting PPA record #%d', index)
self._sas_admin.InjectZoneData({'record': ppa_record})
except Exception:
logging.error(common_strings.CONFIG_ERROR_SUSPECTED)
raise
for key in iteration_content['protectedEntities']:
if not key in self.protected_entity_records:
self.protected_entity_records[key] = iteration_content[
'protectedEntities'][key]
else:
self.protected_entity_records[key].extend(
iteration_content['protectedEntities'][key])
# Steps 7, 8: Creating FAD Object and Pull FAD records from SAS UUT
logging.info('Steps 7 + 8: retrieve FAD from SAS UUT.')
if self.num_peer_sases:
logging.info('Retrieving SAS UUT FAD.')
self.sas_uut_fad = getFullActivityDumpSasUut(self._sas,
self._sas_admin,
self.fad_cert,
self.fad_key)
# Step 9: load SAS test harnesses and have the main test harness retrieve
# those FADs for use in the reference models.
if self.num_peer_sases:
# Configure SAS Test Harnesses with new FAD information.
logging.info('Step 9: re-configuring SAS test harnesses.')
for test_harness, test_harness_data in zip(
self.sas_test_harness_objects,
iteration_content['sasTestHarnessData']):
self.InjectTestHarnessFccIds(test_harness_data['cbsdRecords'])
test_harness.writeFadRecords([test_harness_data['cbsdRecords']])
# Pull FAD from SAS Test Harnesses; a FAD object is created for each SAS Test Harness.
logging.info('Collecting SAS TH FADs.')
self.test_harness_fads = []
for test_harness in self.sas_test_harness_objects:
self.test_harness_fads.append(
getFullActivityDumpSasTestHarness(
test_harness.getSasTestHarnessInterface()))
# Step 10: Trigger CPAS and wait until completion.
logging.info('Step 10: Triggering CPAS.')
self.cpas = self.cpas_executor.submit(self.TriggerDailyActivitiesImmediatelyAndWaitUntilComplete)
if self.num_peer_sases:
# Step 11: Invoke IAP reference model
logging.info('Step 11: Performing pre-IAP filtering.')
pre_iap_filtering.preIapReferenceModel(self.protected_entity_records,
self.sas_uut_fad, self.test_harness_fads)
logging.info('Step 11: Performing IAP.')
self.performIap()
# Step 12: Invoke DPA ML reference model for currently-active and
# will-be-active DPAs.
logging.info('Step 12: invoke the DPA ML reference model.')
self.dpa_managers = {}
self.dpa_margins = {}
for active_dpa in self.active_dpas + iteration_content['dpaActivationList']:
dpa_config = self.config['dpas'][active_dpa['dpaId']]
dpa = dpa_mgr.BuildDpa(active_dpa['dpaId'], dpa_config['points_builder'])
low_freq_mhz = active_dpa['frequencyRange']['lowFrequency'] // ONE_MHZ
high_freq_mhz = active_dpa['frequencyRange']['highFrequency'] // ONE_MHZ
dpa.ResetFreqRange([(low_freq_mhz, high_freq_mhz)])
if self.num_peer_sases:
logging.info('Calculating move list for DPA: %s', active_dpa)
dpa.SetGrantsFromFad(self.sas_uut_fad, self.test_harness_fads)
dpa.ComputeMoveLists()
dpa_combined_id = '%s,%s,%s' % (
active_dpa['dpaId'], active_dpa['frequencyRange']['lowFrequency'],
active_dpa['frequencyRange']['highFrequency'])
self.dpa_managers[dpa_combined_id] = dpa
self.dpa_margins[dpa_combined_id] = dpa_config['movelistMargin']
logging.info('Waiting for CPAS to complete (Started in step 10).')
self.cpas.result()
logging.info('CPAS started in step 10 complete.')
# Steps 13, 14, 15, and 16: send heartbeat request for the grants,
# relinquish the grant, grant request and heartbeat for new grants.
logging.info('Steps 13 - 16: heartbeat, relinquish, grant, heartbeat.')
for index, domain_proxy_object in enumerate(self.domain_proxy_objects):
logging.info('Working on domain proxy %d.', index)
domain_proxy_object.performHeartbeatAndUpdateGrants()
# Steps 17, 18, and CHECK
logging.info('Steps 17, 18, and CHECK: calculating reference DPA move list,'
' performing DPA aggregate interference check and performing'
' aggregate interference check.')
self.performIapAndDpaChecks()
# Step 19: DP Test Harnesses register N(2,k) CBSDs with SAS UUT.
logging.info('Step 19: registering and granting new CBSDs.')
self.registerAndGrantCbsds(
iteration_content['cbsdRequestsWithDomainProxies'],
iteration_content['cbsdRecords'])
# Step 20: Send heartbeat request for all CBSDs managed by SAS UUT.
logging.info('Step 20: heartbeating all CBSDs.')
for index, domain_proxy_object in enumerate(self.domain_proxy_objects):
logging.info('Heartbeating for domain proxy %d.', index)
domain_proxy_object.heartbeatForAllActiveGrants()
# Step 21: ESC Test harness deactivates previously-activated DPAs.
logging.info('Step 21: activating and deactivating DPAs.')
for dpa in iteration_content['dpaActivationList']:
if dpa['frequencyRange']['lowFrequency'] < LOW_FREQUENCY_LIMIT_HZ:
logging.warning('DPA %s is always-active in frequency, so there is no need to activate.', dpa['dpaId'])
if dpa not in self.active_dpas:
self.active_dpas.append(dpa)
continue
if dpa in self.active_dpas:
logging.warning('DPA is already active, skipping activation: %s', dpa)
continue
logging.info('Activating: %s', dpa)
self._sas_admin.TriggerDpaActivation(dpa)
self.active_dpas.append(dpa)
for dpa in iteration_content['dpaDeactivationList']:
if dpa['frequencyRange']['lowFrequency'] < LOW_FREQUENCY_LIMIT_HZ:
logging.warning('DPA %s is always-active in frequency, so there is no need to deactivate.', dpa['dpaId'])
self.active_dpas.remove(dpa)
continue
logging.info('Deactivating: %s', dpa)
self._sas_admin.TriggerDpaDeactivation(dpa)
self.active_dpas.remove(dpa)
# Step 22: wait for 240 sec if any DPA is activated in Step 21, else 15 sec.
logging.info('Step 22: waiting.')
if iteration_content['dpaActivationList']:
time.sleep(240)
else:
time.sleep(15)
# Step 23: Send heartbeat request for all CBSDs managed by SAS UUT.
logging.info('Step 23: heartbeating all CBSDs.')
for index, domain_proxy_object in enumerate(self.domain_proxy_objects):
logging.info('Heartbeating for domain proxy %d.', index)
domain_proxy_object.heartbeatForAllActiveGrants()
# Steps 24, 25, and CHECK
logging.info('Steps 24, 25, and CHECK: calculating reference DPA move list,'
' performing DPA aggregate interference check and performing'
' aggregate interference check.')
self.performIapAndDpaChecks()
def performIapAndDpaChecks(self):
"""Checks aggregate interference to all protected entities and active DPAs.
"""
# Check DPA movelist + Check IAP interference (can happen in parallel).
self.aggregate_interference_check = self.agg_interf_check_executor.submit(self.performAggregateInterferenceCheck)
# Get list of authorized grants, in the format required from DPA movelist checking.
grant_info = data.getAuthorizedGrantsFromDomainProxies(self.domain_proxy_objects)
# Calculate DPA movelist.
for active_dpa in self.active_dpas:
# Check SAS UUT authorized grants do not exceed allowed threshold as calculated by the DPA reference model.
logging.info('CHECK: DPA aggregate interference check for DPA %s.',
active_dpa['dpaId'])
dpa_combined_id = '%s,%s,%s' % (
active_dpa['dpaId'], active_dpa['frequencyRange']['lowFrequency'],
active_dpa['frequencyRange']['highFrequency'])
dpa = self.dpa_managers[dpa_combined_id]
this_dpa_check_succeeded = dpa.CheckInterference(
sas_uut_active_grants=grant_info,
margin_db=self.dpa_margins[dpa_combined_id],
do_abs_check_single_uut=(self.num_peer_sases == 0))
if not this_dpa_check_succeeded:
logging.error('Check for DPA %s FAILED.', active_dpa['dpaId'])
self.all_dpa_checks_succeeded = False
logging.info('Waiting for aggregate interference check to complete')
self.aggregate_interference_check.result()
logging.info('Aggregate interference check is now COMPLETE.')
def performIap(self):
self.ppa_ap_iap_ref_values_list = []
self.gwpz_ap_iap_ref_values_list = []
self.fss_blocking_ap_iap_ref_values_list = []
self.fss_cochannel_ap_iap_ref_values_list = []
self.esc_ap_iap_ref_values_list = []
# Calculate the interference value for all PPAs.
if 'ppaRecords' in self.protected_entity_records:
for ppa_record in self.protected_entity_records['ppaRecords']:
pal_records = self.protected_entity_records['palRecords']
# Call IAP reference model for PPA.
logging.info('Calling the IAP reference model for PPA (%s) with PAL records (%s)', ppa_record, pal_records)
ppa_ap_iap_ref_values = iap.performIapForPpa(
ppa_record,
self.sas_uut_fad,
self.test_harness_fads,
pal_records)
# Store the IAP results for future comparison.
logging.debug('IAP reference model results: %s' % str(ppa_ap_iap_ref_values))
self.ppa_ap_iap_ref_values_list.append(ppa_ap_iap_ref_values)
# Calculate the interference value for all GWPZs.
if 'gwpzRecords' in self.protected_entity_records:
for gwpz_record in self.protected_entity_records['gwpzRecords']:
# Call IAP reference model for GWPZ.
logging.info('Calling the IAP reference model for GWPZ (%s).', gwpz_record)
gwpz_ap_iap_ref_values = iap.performIapForGwpz(
gwpz_record,
self.sas_uut_fad,
self.test_harness_fads)
# Store the IAP results for future comparison.
logging.debug('IAP reference model results: %s' % str(gwpz_ap_iap_ref_values))
self.gwpz_ap_iap_ref_values_list.append(gwpz_ap_iap_ref_values)
# Calculate the interference value for all FSS sites.
if 'fssRecords' in self.protected_entity_records:
for fss_record in self.protected_entity_records['fssRecords']:
fss_freq_range = fss_record['record']['deploymentParam'][0]\
['operationParam']['operationFrequencyRange']
fss_low_freq = fss_freq_range['lowFrequency']
fss_high_freq = fss_freq_range['highFrequency']
fss_ttc_flag = fss_record['ttc']
if (fss_low_freq >= interference.FSS_LOW_FREQ_HZ and
fss_low_freq < interference.CBRS_HIGH_FREQ_HZ):
# Call IAP for FSS blocking and FSS cochannel.
logging.info('Calling the cochannel IAP reference model for FSS (%s).', fss_record)
fss_cochannel_ap_iap_ref_values = iap.\
performIapForFssCochannel(fss_record, self.sas_uut_fad, self.test_harness_fads)
logging.debug('IAP reference model results: %s' % str(fss_cochannel_ap_iap_ref_values))
logging.info('Calling the blocking IAP reference model for FSS (%s).', fss_record)
fss_blocking_ap_iap_ref_values = iap.\
performIapForFssBlocking(fss_record, self.sas_uut_fad, self.test_harness_fads)
logging.debug('IAP reference model results: %s' % str(fss_blocking_ap_iap_ref_values))
# Store the IAP results for future comparison.
self.fss_cochannel_ap_iap_ref_values_list.append(fss_cochannel_ap_iap_ref_values)
self.fss_blocking_ap_iap_ref_values_list.append(fss_blocking_ap_iap_ref_values)
elif (fss_low_freq >= interference.FSS_TTC_LOW_FREQ_HZ and
fss_high_freq <= interference.FSS_TTC_HIGH_FREQ_HZ and
fss_ttc_flag is True):
# Call IAP reference model for FSS blocking.
logging.info('Calling the blocking IAP reference model for FSS (%s).', fss_record)
fss_blocking_ap_iap_ref_values = iap.\
performIapForFssBlocking(fss_record, self.sas_uut_fad, self.test_harness_fads)
logging.debug('IAP reference model results: %s' % str(fss_blocking_ap_iap_ref_values))
# Store the IAP results for future comparison.
self.fss_cochannel_ap_iap_ref_values_list.append(None)
self.fss_blocking_ap_iap_ref_values_list.append(fss_blocking_ap_iap_ref_values)
else:
self.fss_cochannel_ap_iap_ref_values_list.append(None)
self.fss_blocking_ap_iap_ref_values_list.append(None)
# Calculate the interference value for all ESCs.
if 'escRecords' in self.protected_entity_records:
for esc_record in self.protected_entity_records['escRecords']:
# Call IAP reference model for ESC.
logging.info('Calling the IAP reference model for ESC (%s).' % str(esc_record))
esc_ap_iap_ref_values = iap.performIapForEsc(
esc_record,
self.sas_uut_fad,
self.test_harness_fads)
# Store the IAP results for future comparison.
logging.debug('IAP reference model results: %s' % str(esc_ap_iap_ref_values))
self.esc_ap_iap_ref_values_list.append(esc_ap_iap_ref_values)
def performAggregateInterferenceCheck(self):
authorized_grants = None
if any(key in self.protected_entity_records
for key in ['gwpzRecords', 'fssRecords', 'escRecords']):
# Get Grant info for all CBSDs with grants from SAS UUT.
authorized_grants = data.getAuthorizedGrantsFromDomainProxies(
self.domain_proxy_objects)
# Calculate and compare the interference value for PPA protected entity
if 'ppaRecords' in self.protected_entity_records:
for index, ppa_record in enumerate(self.protected_entity_records['ppaRecords']):
pal_records = self.protected_entity_records['palRecords']
# Get Grant info for all CBSDs with grants from SAS UUT that are not
# part of the current ppa.
logging.info('Checking aggregate interference for PPA (%s) with PAL records (%s)', ppa_record, pal_records)
ppa_authorized_grants = data.getAuthorizedGrantsFromDomainProxies(
self.domain_proxy_objects, ppa_record=ppa_record)
# Call aggregate interference reference model for ppa
ppa_aggr_interference = aggregate_interference.calculateAggregateInterferenceForPpa(
ppa_record, pal_records, ppa_authorized_grants)
ppa_ap_iap_ref_values = None
if self.num_peer_sases > 0:
ppa_ap_iap_ref_values = self.ppa_ap_iap_ref_values_list[index]
else:
ppa_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_PPA_DBM_PER_IAPBW)
# Compare the interference values calculated from both models
self.compareIapAndAggregateResults(ppa_ap_iap_ref_values, ppa_aggr_interference, 'area')
# Calculate and compare the interference value for GWPZ protected entity
if 'gwpzRecords' in self.protected_entity_records:
for index, gwpz_record in enumerate(self.protected_entity_records['gwpzRecords']):
logging.info('Checking aggregate interference for GWPZ (%s).', gwpz_record)
# Call aggregate interference reference model for GWPZ.
gwpz_aggr_interference = aggregate_interference.calculateAggregateInterferenceForGwpz(
gwpz_record,
authorized_grants)
gwpz_ap_iap_ref_values = None
if self.num_peer_sases > 0:
gwpz_ap_iap_ref_values = self.gwpz_ap_iap_ref_values_list[index]
else:
gwpz_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_GWPZ_DBM_PER_IAPBW)
# Compare the interference values calculated from both models
self.compareIapAndAggregateResults(gwpz_ap_iap_ref_values, gwpz_aggr_interference, 'area')
# Calculate and compare the interference value for FSS site.
if 'fssRecords' in self.protected_entity_records:
for index, fss_record in enumerate(self.protected_entity_records['fssRecords']):
logging.info('Checking aggregate interference for FSS (%s).', fss_record)
fss_freq_range = fss_record['record']['deploymentParam'][0]\
['operationParam']['operationFrequencyRange']
fss_low_freq = fss_freq_range['lowFrequency']
fss_high_freq = fss_freq_range['highFrequency']
fss_ttc_flag = fss_record['ttc']
if (fss_low_freq >= interference.FSS_LOW_FREQ_HZ and
fss_low_freq < interference.CBRS_HIGH_FREQ_HZ):
# Call aggregate interference for FSS blocking ad FSS cochannel.
fss_cochannel_aggr_interference = aggregate_interference.\
calculateAggregateInterferenceForFssCochannel(fss_record, authorized_grants)
fss_blocking_aggr_interference = aggregate_interference.\
calculateAggregateInterferenceForFssBlocking(fss_record, authorized_grants)
fss_cochannel_ap_iap_ref_values = None
fss_blocking_ap_iap_ref_values = None
if self.num_peer_sases > 0:
fss_cochannel_ap_iap_ref_values = self.fss_cochannel_ap_iap_ref_values_list[index]
fss_blocking_ap_iap_ref_values = self.fss_blocking_ap_iap_ref_values_list[index]
else:
fss_cochannel_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_FSS_CO_CHANNEL_DBM_PER_IAPBW)
fss_blocking_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_FSS_BLOCKING_DBM_PER_RBW)
# Check and compare interference for FSS entity
logging.info('Checking cochannel.')
self.compareIapAndAggregateResults(fss_cochannel_ap_iap_ref_values, fss_cochannel_aggr_interference,'point')
logging.info('Checking blocking.')
self.compareIapAndAggregateResults(fss_blocking_ap_iap_ref_values, fss_blocking_aggr_interference,'point')
elif (fss_low_freq >= interference.FSS_TTC_LOW_FREQ_HZ and
fss_high_freq <= interference.FSS_TTC_HIGH_FREQ_HZ and
fss_ttc_flag is True):
# Call aggregate interference model for FSS blocking.
fss_blocking_aggr_interference = aggregate_interference.\
calculateAggregateInterferenceForFssBlocking(fss_record, authorized_grants)
fss_blocking_ap_iap_ref_values = None
if self.num_peer_sases > 0:
fss_blocking_ap_iap_ref_values = self.fss_blocking_ap_iap_ref_values_list[index]
else:
fss_blocking_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_FSS_BLOCKING_DBM_PER_RBW)
# Compare the interference values calculated from both models
logging.info('Checking blocking.')
self.compareIapAndAggregateResults(fss_blocking_ap_iap_ref_values,\
fss_blocking_aggr_interference, 'point')
# Calculate and compare the interference value for ESC.
if 'escRecords' in self.protected_entity_records:
for index, esc_record in enumerate(self.protected_entity_records['escRecords']):
logging.info('Checking aggregate interference for ESC (%s).', esc_record)
# Call aggregate interference model for ESC.
esc_aggr_interference = aggregate_interference.\
calculateAggregateInterferenceForEsc(esc_record, authorized_grants)
esc_ap_iap_ref_values = None
if self.num_peer_sases > 0:
esc_ap_iap_ref_values = self.esc_ap_iap_ref_values_list[index]
else:
esc_ap_iap_ref_values = interference.dbToLinear(iap.THRESH_ESC_DBM_PER_IAPBW)
# Compare the interference values calculated from both models
self.compareIapAndAggregateResults(esc_ap_iap_ref_values, esc_aggr_interference, 'point')
def compareIapAndAggregateResults(self, ap_iap_ref_values, aggr_interference, entity_type):
"""Verify aggregate interference is less than or equal to ap_iap_ref value calculated
by IAP model plus a delta for each FSS and ESC sensor protected point, and for at
least 95% of the protected points of each PPA and GWPZ.
Args:
ap_iap_ref_values : Aggregate interference output calculated from IAP
reference model
aggr_interference : Aggregate interference output calculated from Aggregate
Inteference reference model
entity_type : To identify its a point protected or area protected entity
Returns:
Pass or Fail based on the comparison of the values within range
"""
iter_cnt = 0 # Variable to count the number of interference entries
match_cnt = 0 # Variable to count the number of matching interference entries
iap_margin_lin = interference.dbToLinear(DELTA_IAP)
scalar = isinstance(ap_iap_ref_values, numbers.Number)
logging.info('IAP aggr interference checking - Entity: %s Thresh: %s',
entity_type, str(ap_iap_ref_values) if scalar else 'from IAP')
for lat_val, lat_dict in six.iteritems(aggr_interference):
for long_val, interf_list in six.iteritems(lat_dict):
if scalar:
# iter produces a infinite generator of the scalar, as zip iterates
# till one of the passed iterators is empty this will produce the same
# result as a list of len(interf_list) with the scalar in every field,
# but without having to construct the list.
ref_interf_list = iter((lambda : ap_iap_ref_values), 1)
else:
ref_interf_list = ap_iap_ref_values[lat_val][long_val]
self.assertEqual(len(interf_list), len(ref_interf_list))
if entity_type == 'area':
# Area entities must check that each point is entirely not interfered
# with above the allowed threshold.
iter_cnt += 1
below_threshold = True
list_index = 0
logging.debug('Check lat %s long %s: %s',
lat_val, long_val,
list(zip(interf_list, ref_interf_list)))
for interf, ref_interf in zip(interf_list, ref_interf_list):
list_index += 1
if interf > ref_interf * iap_margin_lin:
below_threshold = False
logging.info('Threshold exceeded for chan %d: interf=%s ref_interf=%s',
list_index, interf, ref_interf)
if below_threshold:
match_cnt += 1
else:
list_index = 0
logging.debug('Check lat %s long %s: %s',
lat_val, long_val,
list(zip(interf_list, ref_interf_list)))
for interf, ref_interf in zip(interf_list, ref_interf_list):
iter_cnt += 1
list_index += 1
if interf <= ref_interf * iap_margin_lin:
match_cnt += 1
else:
logging.info('Threshold exceeded for chan %d: interf=%s ref_interf=%s',
list_index, interf, ref_interf)
# All types of protection must have at least one point.
self.assertGreater(iter_cnt, 0, msg=common_strings.CONFIG_ERROR_SUSPECTED)
logging.info('Protection type: "%s"; point count: %d; match count: %d.' % (entity_type, iter_cnt, match_cnt))
if entity_type == 'area':
# For protected areas the match of total interference values
# should be greater than or equal to 95 percent.
match_percentage = (100.0 * match_cnt) / iter_cnt
self.assertGreaterEqual(
match_percentage, 95,
'Expected 95 percent of locations to be below the threshold but only %2.3f percent were.'
% match_percentage)
else:
# For Protection points the comparison match should be 100 percent.
self.assertEqual(
match_cnt, iter_cnt,
'Expected all constraints to be below the threshold but %d were not.' %
(iter_cnt - match_cnt))
def registerAndGrantCbsds(self, cbsd_requests_with_domain_proxy,
individual_cbsd_records):
"""Registers new CBSDs with existing domain proxies and new individual
CBSDs. Sends a Grant request for each.
"""
dp_index = 0
for domain_proxy_object, cbsdRequestsWithDomainProxy in zip(
self.domain_proxy_objects, cbsd_requests_with_domain_proxy):
registration_requests = cbsdRequestsWithDomainProxy[
'registrationRequests']
grant_requests = cbsdRequestsWithDomainProxy['grantRequests']
conditional_registration_data = cbsdRequestsWithDomainProxy[
'conditionalRegistrationData']
# Initiation of Registration and Grant procedures for the CBSDs using domain proxies.
logging.info('Registering and Granting CBSDs which use domain proxy %d.',
dp_index)
dp_index += 1
domain_proxy_object.registerCbsdsAndRequestGrants(
registration_requests,
grant_requests,
conditional_registration_data=conditional_registration_data)
# Register individual Cbsds(Domain Proxy with single CBSD)
for cbsd_index, cbsd_record in enumerate(individual_cbsd_records):
logging.info('Registering and Granting CBSD %d.', cbsd_index)
proxy = test_harness_objects.DomainProxy(self, cbsd_record['clientCert'],
cbsd_record['clientKey'])
conditionalRegistrationData = [
cbsd_record['conditionalRegistrationData']
] if 'conditionalRegistrationData' in cbsd_record else []
proxy.registerCbsdsAndRequestGrants([cbsd_record['registrationRequest']],
[cbsd_record['grantRequest']],
conditionalRegistrationData)
self.domain_proxy_objects.append(proxy)
class MultiConstraintProtectionTestcase(McpXprCommonTestcase):
def setUp(self):
self._sas, self._sas_admin = sas.GetTestingSas()
self._sas_admin.Reset()
def tearDown(self):
self.ShutdownServers()
def generate_MCP_1_default_config(self, filename):
""" Generates the WinnForum configuration for MCP.1. """
# Load devices for SAS UUT for multiple iterations through multiple domain proxy's
device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
device_5 = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
device_6 = json_load(
os.path.join('testcases', 'testdata', 'device_f.json'))
device_7 = json_load(
os.path.join('testcases', 'testdata', 'device_g.json'))
device_8 = json_load(
os.path.join('testcases', 'testdata', 'device_h.json'))
# Load Grant requests
grant_request_1 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_2 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_3 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_4 = json_load(
os.path.join('testcases', 'testdata', 'grant_3.json'))
grant_request_5 = json_load(
os.path.join('testcases', 'testdata', 'grant_0.json'))
grant_request_6 = json_load(
os.path.join('testcases', 'testdata', 'grant_1.json'))
grant_request_7 = json_load(
os.path.join('testcases', 'testdata', 'grant_2.json'))
grant_request_8 = json_load(
os.path.join('testcases', 'testdata', 'grant_3.json'))
# device_b device_d and device_h are of Category B
# Load Conditional Data
self.assertEqual(device_2['cbsdCategory'], 'B')
conditionals_device_2 = {
'cbsdCategory': device_2['cbsdCategory'],
'fccId': device_2['fccId'],
'cbsdSerialNumber': device_2['cbsdSerialNumber'],
'airInterface': device_2['airInterface'],
'installationParam': device_2['installationParam'],
'measCapability': device_2['measCapability']
}
self.assertEqual(device_4['cbsdCategory'], 'B')
conditionals_device_4 = {
'cbsdCategory': device_4['cbsdCategory'],
'fccId': device_4['fccId'],
'cbsdSerialNumber': device_4['cbsdSerialNumber'],
'airInterface': device_4['airInterface'],
'installationParam': device_4['installationParam'],
'measCapability': device_4['measCapability']
}
self.assertEqual(device_8['cbsdCategory'], 'B')
conditionals_device_8 = {
'cbsdCategory': device_8['cbsdCategory'],
'fccId': device_8['fccId'],
'cbsdSerialNumber': device_8['cbsdSerialNumber'],
'airInterface': device_8['airInterface'],
'installationParam': device_8['installationParam'],
'measCapability': device_8['measCapability']
}
# Remove conditionals from registration
del device_2['cbsdCategory']
del device_2['airInterface']
del device_2['installationParam']
del device_2['measCapability']
del device_4['cbsdCategory']
del device_4['airInterface']
del device_4['installationParam']
del device_4['measCapability']
del device_8['cbsdCategory']
del device_8['airInterface']
del device_8['installationParam']
del device_8['measCapability']
# Load GWPZ Record
gwpz_record_1 = json_load(
os.path.join('testcases', 'testdata', 'gwpz_record_0.json'))
# Load FSS record
fss_record_1 = json_load(
os.path.join('testcases', 'testdata', 'fss_record_0.json'))
# Load ESC record
esc_record_1 = json_load(
os.path.join('testcases', 'testdata', 'esc_sensor_record_0.json'))
# Load PPA and PAL record
ppa_record = json_load(
os.path.join('testcases', 'testdata', 'ppa_record_0.json'))
pal_record = json_load(
os.path.join('testcases', 'testdata', 'pal_record_0.json'))
pal_low_frequency = 3550000000
pal_high_frequency = 3560000000
ppa_record_1, pal_records_1 = makePpaAndPalRecordsConsistent(ppa_record,
[pal_record],
pal_low_frequency,
pal_high_frequency,
'test_user_1')
# Define DPAs
dpa_1 = {
'dpaId': 'East4',
'frequencyRange': {'lowFrequency': 3550000000, 'highFrequency': 3650000000}
}
dpa_2 = {
'dpaId': 'East5',
'frequencyRange': {'lowFrequency': 3550000000, 'highFrequency': 3650000000}
}
dpa_3 = {
'dpaId': 'East6',
'frequencyRange': {'lowFrequency': 3550000000, 'highFrequency': 3650000000}
}
dpa_generic = {
'East4': {
'points_builder': 'default (25, 10, 10, 10)',
'movelistMargin': 10
},
'East5': {
'points_builder': 'default (25, 10, 10, 10)',
'movelistMargin': 10
},
'East6': {
'points_builder': 'default (25, 10, 10, 10)',
'movelistMargin': 10
}
}
# Registration and grant records for multiple iterations
cbsd_records_iteration_0_domain_proxy_0 = {
'registrationRequests': [device_1, device_2],
'grantRequests': [grant_request_1, grant_request_2],
'conditionalRegistrationData': [conditionals_device_2]
}
cbsd_records_iteration_0_domain_proxy_1 = {
'registrationRequests': [device_3],
'grantRequests': [grant_request_3],
'conditionalRegistrationData': []
}
cbsd_records_iteration_1_domain_proxy_0 = {
'registrationRequests': [device_4],
'grantRequests': [grant_request_4],
'conditionalRegistrationData': [conditionals_device_4]
}
cbsd_records_iteration_1_domain_proxy_1 = {
'registrationRequests': [device_5, device_6],
'grantRequests': [grant_request_5, grant_request_6],
'conditionalRegistrationData': []
}
# Protected entities records for multiple iterations
protected_entities_iteration_0 = {
'palRecords': pal_records_1,
'ppaRecords': [ppa_record_1],
'escRecords': [esc_record_1]
}
protected_entities_iteration_1 = {
'gwpzRecords': [gwpz_record_1],
'fssRecords': [fss_record_1]
}
# SAS Test Harnesses configurations,
# Following configurations are for two SAS test harnesses for two iterations
sas_test_harness_device_1 = json_load(
os.path.join('testcases', 'testdata', 'device_a.json'))
sas_test_harness_device_1['fccId'] = 'test_fcc_id_g'
sas_test_harness_device_1['userId'] = 'test_user_id_g'
sas_test_harness_device_2 = json_load(
os.path.join('testcases', 'testdata', 'device_b.json'))
sas_test_harness_device_2['fccId'] = 'test_fcc_id_h'
sas_test_harness_device_2['userId'] = 'test_user_id_h'
sas_test_harness_device_3 = json_load(
os.path.join('testcases', 'testdata', 'device_c.json'))
sas_test_harness_device_3['fccId'] = 'test_fcc_id_i'
sas_test_harness_device_3['userId'] = 'test_user_id_i'
sas_test_harness_device_4 = json_load(
os.path.join('testcases', 'testdata', 'device_d.json'))
sas_test_harness_device_4['fccId'] = 'test_fcc_id_j'
sas_test_harness_device_4['userId'] = 'test_user_id_j'
sas_test_harness_device_5 = json_load(
os.path.join('testcases', 'testdata', 'device_e.json'))
sas_test_harness_device_5['fccId'] = 'test_fcc_id_k'
sas_test_harness_device_5['userId'] = 'test_user_id_k'
sas_test_harness_device_6 = json_load(
os.path.join('testcases', 'testdata', 'device_f.json'))
sas_test_harness_device_6['fccId'] = 'test_fcc_id_l'
sas_test_harness_device_6['userId'] = 'test_user_id_l'
# Generate Cbsd FAD Records for SAS Test Harness 0, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_0 = generateCbsdRecords([sas_test_harness_device_1], [[grant_request_1]])
# Generate Cbsd FAD Records for SAS Test Harness 1, iteration 0
cbsd_fad_records_iteration_0_sas_test_harness_1 = generateCbsdRecords([sas_test_harness_device_4, sas_test_harness_device_5],
[[grant_request_2, grant_request_3], [grant_request_4]])
# Generate Cbsd FAD Records for SAS Test Harness 0, iteration 1
cbsd_fad_records_iteration_1_sas_test_harness_0 = generateCbsdRecords([sas_test_harness_device_2, sas_test_harness_device_3],
[[grant_request_1], [grant_request_2]])
# Generate Cbsd FAD Records for SAS Test Harness 1, iteration 1
cbsd_fad_records_iteration_1_sas_test_harness_1 = generateCbsdRecords([sas_test_harness_device_6], [[grant_request_5, grant_request_6]])
# Generate SAS Test Harnesses dump records for multiple iterations
dump_records_iteration_0_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_0
}
dump_records_iteration_1_sas_test_harness_0 = {
'cbsdRecords': cbsd_fad_records_iteration_1_sas_test_harness_0
}
dump_records_iteration_0_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_iteration_0_sas_test_harness_1
}
dump_records_iteration_1_sas_test_harness_1 = {
'cbsdRecords': cbsd_fad_records_iteration_1_sas_test_harness_1
}
# SAS Test Harnesses configuration
sas_test_harness_0_config = {
'sasTestHarnessName': 'SAS-TH-1',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas.cert'),
'serverKey': getCertFilename('sas.key'),
'caCert': getCertFilename('ca.cert'),
}
sas_test_harness_1_config = {
'sasTestHarnessName': 'SAS-TH-2',
'hostName': getFqdnLocalhost(),
'port': getUnusedPort(),
'serverCert': getCertFilename('sas_1.cert'),
'serverKey': getCertFilename('sas_1.key'),
'caCert': getCertFilename('ca.cert'),
}
# Create the actual config.
iteration0_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_iteration_0_domain_proxy_0, cbsd_records_iteration_0_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_7,
'grantRequest': grant_request_7,
'clientCert': getCertFilename('device_g.cert'),
'clientKey': getCertFilename('device_g.key')
}],
'protectedEntities': protected_entities_iteration_0,
'dpaActivationList': [dpa_1, dpa_2],
'dpaDeactivationList': [],
'sasTestHarnessData': [dump_records_iteration_0_sas_test_harness_0, dump_records_iteration_0_sas_test_harness_1]
}
iteration1_config = {
'cbsdRequestsWithDomainProxies': [cbsd_records_iteration_1_domain_proxy_0, cbsd_records_iteration_1_domain_proxy_1],
'cbsdRecords': [{
'registrationRequest': device_8,
'grantRequest': grant_request_8,
'conditionalRegistrationData': conditionals_device_8,
'clientCert': getCertFilename('device_h.cert'),
'clientKey': getCertFilename('device_h.key')
}],
'protectedEntities': protected_entities_iteration_1,
'dpaActivationList': [dpa_3],
'dpaDeactivationList': [dpa_1],
'sasTestHarnessData': [dump_records_iteration_1_sas_test_harness_0, dump_records_iteration_1_sas_test_harness_1]
}
config = {
'initialCbsdRequestsWithDomainProxies': self.getEmptyCbsdRequestsWithDomainProxies(2),
'initialCbsdRecords': [],
'iterationData': [iteration0_config, iteration1_config],
'sasTestHarnessConfigs': [sas_test_harness_0_config, sas_test_harness_1_config],
'domainProxyConfigs': [{'cert': getCertFilename('domain_proxy.cert'),
'key': getCertFilename('domain_proxy.key')},
{'cert': getCertFilename('domain_proxy_1.cert'),
'key': getCertFilename('domain_proxy_1.key')}],
'dpas': dpa_generic
}
writeConfig(filename, config)
@configurable_testcase(generate_MCP_1_default_config)
def test_WINNF_FT_S_MCP_1(self, config_filename):
"""SAS manages a mix of GAA and PAL Grants in 3550 MHz
to 3700 MHz to protect configurable IAP-protected entities and DPAs
"""
config = loadConfig(config_filename)
# Invoke MCP test steps
self.executeMcpTestSteps(config, 'MCP')
|
|
#!/usr/bin/python
# Copyright (c) 2014 Quanta Research Cambridge, Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import ply.lex as lex
import AST
import json, os, re, sys
import bsvpreprocess
import globalv
import cppgen, bsvgen
scripthome = os.path.dirname(os.path.abspath(__file__))
noisyFlag=True
parseDebugFlag=False
parseTrace=False
tokens = (
'AMPER',
'AMPERAMPER',
'AMPERAMPERAMPER',
'APOSTROPHE',
'BANG',
'BAR',
'BARBAR',
'BUILTINVAR',
'CARET',
'COLON',
'COLONCOLON',
'COMMA',
'DOT',
'EQEQ',
'EQUAL',
'GEQ',
'GREATER',
'GREATERGREATER',
'HASH',
'LARROW',
'LBRACE',
'LBRACKET',
'LEQ',
'LESS',
'LESSLESS',
'LPAREN',
'LPARENSTAR',
'MINUS',
'NEQ',
'NUM',
'PERCENT',
'PLUS',
'QUESTION',
'RBRACE',
'RBRACKET',
'RPAREN',
'RPARENSTAR',
'SEMICOLON',
'SLASH',
'STAR',
'STARSTAR',
'STR',
'TILDE',
'TILDEAMPER',
'TILDEBAR',
'TILDECARET',
'VAR'
)
reserved = {
'action': 'TOKACTION',
'Action': 'TOKUACTION',
'actionvalue': 'TOKACTIONVALUE',
'BDPI': 'TOKBDPI',
'begin': 'TOKBEGIN',
'BVI': 'TOKBVI',
'C': 'TOKC',
'case': 'TOKCASE',
'CF': 'TOKCF',
'clocked_by': 'TOKCLOCKED_BY',
'default': 'TOKDEFAULT',
'default_clock': 'TOKDEFAULT_CLOCK',
'default_reset': 'TOKDEFAULT_RESET',
'`define': 'TOKTICKDEFINE',
'dependencies': 'TOKDEPENDENCIES',
'deriving': 'TOKDERIVING',
'determines': 'TOKDETERMINES',
'else': 'TOKELSE',
'enable': 'TOKENABLE',
'end': 'TOKEND',
'endaction': 'TOKENDACTION',
'endactionvalue': 'TOKENDACTIONVALUE',
'endcase': 'TOKENDCASE',
'endfunction': 'TOKENDFUNCTION',
'endinstance': 'TOKENDINSTANCE',
'endinterface': 'TOKENDINTERFACE',
'endmethod': 'TOKENDMETHOD',
'endmodule': 'TOKENDMODULE',
'endpackage': 'TOKENDPACKAGE',
'endpar': 'TOKENDPAR',
'endrule': 'TOKENDRULE',
'endrules': 'TOKENDRULES',
'endseq': 'TOKENDSEQ',
'endtypeclass': 'TOKENDTYPECLASS',
'enum': 'TOKENUM',
'export': 'TOKEXPORT',
'for': 'TOKFOR',
'function': 'TOKFUNCTION',
'if': 'TOKIF',
'import': 'TOKIMPORT',
# 'in': 'TOKIN',
'input_clock': 'TOKINPUT_CLOCK',
'input_reset': 'TOKINPUT_RESET',
'instance': 'TOKINSTANCE',
'interface': 'TOKINTERFACE',
'let': 'TOKLET',
'match': 'TOKMATCH',
'matches': 'TOKMATCHES',
'method': 'TOKMETHOD',
'module': 'TOKMODULE',
'no_reset': 'TOKNO_RESET',
'numeric': 'TOKNUMERIC',
'output_clock': 'TOKOUTPUT_CLOCK',
'output_reset': 'TOKOUTPUT_RESET',
'package': 'TOKPACKAGE',
'par': 'TOKPAR',
'port': 'TOKPORT',
'parameter': 'TOKPARAMETER',
'port': 'TOKPORT',
'provisos': 'TOKPROVISOS',
'ready': 'TOKREADY',
'reset_by': 'TOKRESET_BY',
'return': 'TOKRETURN',
'rule': 'TOKRULE',
'rules': 'TOKRULES',
'SB': 'TOKSB',
'SBR': 'TOKSBR',
'schedule': 'TOKSCHEDULE',
'seq': 'TOKSEQ',
'_when_': 'TOKWHEN',
'Stmt' : 'TOKSTMT',
'struct': 'TOKSTRUCT',
'tagged': 'TOKTAGGED',
'type': 'TOKTYPE',
'typeclass': 'TOKTYPECLASS',
'typedef': 'TOKTYPEDEF',
'union': 'TOKUNION',
'while': 'TOKWHILE',
}
for tok in reserved.values():
tokens = tokens + (tok,)
t_AMPER = r'&'
t_AMPERAMPER = r'&&'
t_AMPERAMPERAMPER = r'&&&'
t_APOSTROPHE = r'\''
t_BANG = r'!'
t_BAR = r'\|'
t_BARBAR = r'\|\|'
t_CARET = r'\^'
t_COLON = r':'
t_COLONCOLON = r'::'
t_COMMA = r','
t_DOT = r'[\.]'
t_EQEQ = r'=='
t_EQUAL = r'='
t_GEQ = r'>='
t_GREATER = r'>'
t_GREATERGREATER = r'>>'
t_HASH = r'\#'
t_LARROW = r'<-'
t_LBRACE = r'{'
t_LBRACKET = r'\['
t_LEQ = r'<='
t_LESS = r'<'
t_LESSLESS = r'<<'
t_LPAREN = r'\('
t_LPARENSTAR = r'\(\*'
t_MINUS = r'[-]'
t_NEQ = r'!='
t_NUM = r'(([0-9]+\'?[bdh\.]?[0-9a-zA-Z?]*)|(\'[bdh\.]?[0-9a-zA-Z?]+))'
t_PERCENT = r'%'
t_PLUS = r'\+'
t_QUESTION = r'\?'
t_RBRACE = r'}'
t_RBRACKET = r'\]'
t_RPAREN = r'\)'
t_RPARENSTAR = r'\*\)'
t_SEMICOLON = r';'
t_SLASH = r'/'
t_STAR = r'\*'
t_STARSTAR = r'\*\*'
t_STR = r'"[^\"]*"'
t_TILDE = r'~'
t_TILDEAMPER = r'~\&'
t_TILDEBAR = r'~\|'
t_TILDECARET = r'~^'
t_ignore = ' \t\f'
def t_error(t):
print "Illegal character '%s' in file '%s'" % (t.value[0], globalfilename)
t.lexer.skip(1)
def p_error(errtoken):
if hasattr(errtoken, 'lineno'):
sys.stderr.write("%s:%d: Syntax error, token=%s\n" % (globalfilename, errtoken.lineno, errtoken.type))
else:
sys.stderr.write("%s: Syntax error, token=%s\n" % (globalfilename, errtoken))
return None
def t_VAR(t):
r'`?([a-zA-Z_][$a-zA-Z0-9_]*)|(\\[-+*/|^&][*]?)'
t.type = reserved.get(t.value,'VAR')
return t
t_BUILTINVAR = r'\$[a-zA-Z_][a-zA-Z0-9_]*'
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_COMMENT(t):
r'//.*'
pass
def t_MCOMMENT(t):
r'/\*(.|\n)*?\*/'
#print t.value, t.value.count('\n'), t.lineno
t.lineno += t.value.count('\n')
import ply.yacc as yacc
def p_goal(p):
'goal : package '
p[0] = p[1]
def p_typeParams(p):
'''typeParams :
| type
| typeParams COMMA type'''
if len(p) == 2:
p[0] = [p[1]]
elif len(p) == 4:
p[0] = p[1] + [p[3]]
else:
p[0] = []
def p_type(p):
'''type : VAR
| VAR COLONCOLON VAR
| NUM
| TOKUACTION
| VAR HASH LPAREN typeParams RPAREN
| VAR COLONCOLON VAR HASH LPAREN typeParams RPAREN'''
if len(p) == 2:
p[0] = AST.Type(p[1], [])
elif len(p) == 4:
p[0] = p[3]
elif len(p) == 8:
p[0] = AST.Type(p[3], p[6])
else:
p[0] = AST.Type(p[1], p[4])
def p_expressions(p):
'''expressions : expression
|
| expressions COMMA expression'''
precedence = (
('left', 'STAR', 'SLASH', 'PERCENT'),
('left', 'PLUS', 'MINUS'),
('left', 'GREATERGREATER', 'LESSLESS'),
('left', 'LEQ', 'GEQ', 'LESS', 'GREATER'),
('left', 'EQEQ', 'NEQ'),
('left', 'AMPER'),
('left', 'CARET'),
('left', 'TILDECARET'),
('left', 'BAR'),
('left', 'AMPERAMPER'),
('left', 'BARBAR'),
('left', 'AMPERAMPERAMPER')
)
def p_colonVar(p):
'''colonVar :
| COLON VAR'''
def p_expression(p):
'''expression : caseExpr
| binaryExpression'''
p[0] = p[1]
def p_caseExprItem(p):
'''caseExprItem : pattern COLON expression SEMICOLON'''
def p_caseExprItems(p):
'''caseExprItems :
| caseExprItems caseExprItem'''
def p_defaultExprItem(p):
'''defaultExprItem :
| TOKDEFAULT expression SEMICOLON
| TOKDEFAULT COLON expression SEMICOLON'''
def p_caseExpr(p):
'''caseExpr : TOKCASE LPAREN expression RPAREN caseExprItems defaultExprItem TOKENDCASE
| TOKCASE LPAREN expression RPAREN TOKMATCHES caseExprItems defaultExprItem TOKENDCASE'''
def p_binaryExpression(p):
'''binaryExpression : unaryExpression
| binaryExpression AMPERAMPERAMPER binaryExpression
| binaryExpression MINUS binaryExpression
| binaryExpression PLUS binaryExpression
| binaryExpression STAR binaryExpression
| binaryExpression STARSTAR binaryExpression
| binaryExpression APOSTROPHE binaryExpression
| binaryExpression SLASH binaryExpression
| binaryExpression CARET binaryExpression
| binaryExpression LESS binaryExpression
| binaryExpression GREATER binaryExpression
| binaryExpression GEQ binaryExpression
| binaryExpression LESSLESS binaryExpression
| binaryExpression LEQ binaryExpression
| binaryExpression GREATERGREATER binaryExpression
| binaryExpression EQEQ binaryExpression
| binaryExpression NEQ binaryExpression
| binaryExpression AMPER binaryExpression
| binaryExpression AMPERAMPER binaryExpression
| binaryExpression BAR binaryExpression
| binaryExpression BARBAR binaryExpression
| binaryExpression PERCENT binaryExpression'''
p[0] = p[1]
def p_unaryExpression(p):
'''unaryExpression : term
| PLUS term
| MINUS term
| BANG term
| TILDE term
| AMPER term
| TILDEAMPER term
| BAR term
| TILDEBAR term
| CARET term
| TILDECARET term
| TOKACTION colonVar expressionStmts TOKENDACTION colonVar
| TOKACTIONVALUE colonVar expressionStmts TOKENDACTIONVALUE colonVar
'''
p[0] = p[1]
def p_term(p):
'''term : type
| type LBRACKET expression RBRACKET
| type LBRACKET expression COLON expression RBRACKET
| STR
| QUESTION
| term QUESTION expression
| term QUESTION expression COLON expression
| LPAREN expression RPAREN
| TOKINTERFACE VAR interfaceHashParams SEMICOLON expressionStmts TOKENDINTERFACE colonVar
| TOKINTERFACE VAR COLONCOLON VAR interfaceHashParams SEMICOLON expressionStmts TOKENDINTERFACE colonVar
| TOKINTERFACE VAR expressionStmts TOKENDINTERFACE colonVar
| TOKINTERFACE VAR COLONCOLON VAR expressionStmts TOKENDINTERFACE colonVar
| BUILTINVAR
| TOKCLOCKED_BY expression
| TOKRESET_BY expression
| TOKTAGGED VAR
| TOKTAGGED VAR expression
| TOKTAGGED VAR LBRACE structInits RBRACE
| term LBRACE structInits RBRACE
| term TOKMATCHES pattern
| LBRACE expressions RBRACE
| term DOT VAR
| term LBRACKET expression RBRACKET DOT term
| term LBRACKET expression RBRACKET
| term LBRACKET expression COLON expression RBRACKET
| term LPAREN params RPAREN DOT term
| term LPAREN params RPAREN'''
if len(p) > 2 and type(p[1]) == str:
p[0] = p[2]
else:
p[0] = p[1]
def p_structInits(p):
'''structInits :
| structInits COMMA VAR COLON expression
| structInits COMMA VAR COLON DOT VAR
| VAR COLON expression
| VAR COLON DOT VAR'''
def p_structPatternElements(p):
'''structPatternElements : VAR COLON pattern
| structPatternElements COMMA VAR COLON pattern '''
def p_pattern(p):
'''pattern : TOKTAGGED VAR
| TOKTAGGED VAR DOT VAR
| TOKTAGGED VAR LBRACE structPatternElements RBRACE
| LBRACE patterns RBRACE
| DOT VAR
| DOT STAR
| NUM'''
def p_patterns(p):
'''patterns : pattern
| patterns COMMA pattern'''
def p_importDecl(p):
'importDecl : TOKIMPORT VAR COLONCOLON STAR SEMICOLON'
if not p[2] in globalimports:
globalimports.append(p[2])
p[0] = p[2]
def p_importDecls(p):
'''importDecls :
| importDecls importDecl'''
def p_exportDecl(p):
'''exportDecl : TOKEXPORT VAR LPAREN DOT DOT RPAREN SEMICOLON
| TOKEXPORT VAR SEMICOLON
| TOKEXPORT VAR COLONCOLON STAR SEMICOLON'''
p[0] = p[2]
def p_exportDecls(p):
'''exportDecls :
| exportDecls exportDecl'''
def p_interfaceFormalParam(p):
'''interfaceFormalParam : TOKTYPE VAR
| VAR interfaceHashParams
| NUM
| TOKNUMERIC TOKTYPE VAR'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[2]
else:
p[0] = p[3]
def p_interfaceFormalParams(p):
'''interfaceFormalParams : interfaceFormalParam
| interfaceFormalParams COMMA interfaceFormalParam'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_interfaceHashParams(p):
'''interfaceHashParams :
| HASH LPAREN interfaceFormalParams RPAREN'''
if len(p) == 5:
p[0] = p[3]
else:
p[0] = []
def p_instanceAttributes(p):
'''instanceAttributes :
| instanceAttributes LPARENSTAR attrSpecs RPARENSTAR'''
def p_subinterfaceDecl(p):
'''subinterfaceDecl : instanceAttributes TOKINTERFACE type VAR SEMICOLON
| type VAR SEMICOLON'''
if len(p) == 6:
name = p[4]
t = p[3]
elif len(p) == 5:
name = p[3]
t = p[2]
else:
name = p[2]
t = p[1]
p[0] = AST.Interface(t.name, t.params, [], name, globalfilename)
def p_parenthesizedFormalParams(p):
'''parenthesizedFormalParams :
| LPAREN RPAREN
| LPAREN moduleFormalParams RPAREN'''
if len(p) < 4:
p[0] = []
else:
p[0] = p[2]
def p_methodDecl(p):
'''methodDecl : TOKMETHOD type VAR parenthesizedFormalParams SEMICOLON'''
p[0] = AST.Method(p[3], p[2], p[4])
def p_interfaceStmt(p):
'''interfaceStmt : subinterfaceDecl
| methodDecl '''
p[0] = p[1]
def p_interfaceStmts(p):
'''interfaceStmts :
| interfaceStmts interfaceStmt'''
if len(p) == 3:
p[0] = p[1] + [p[2]]
else:
p[0] = []
def p_interfaceDecl(p):
'''interfaceDecl : instanceAttributes TOKINTERFACE VAR interfaceHashParams SEMICOLON interfaceStmts TOKENDINTERFACE colonVar'''
interface = AST.Interface(p[3], p[4], p[6], None, globalfilename)
p[0] = interface
# the token '[' signifies an array type
def p_arrayDecl(p):
'''arrayDecl : type VAR LBRACKET NUM RBRACKET'''
arr_t = AST.Type(p[3],p[1])
p[0] = AST.Variable(p[2], arr_t, None)
def p_varDecl(p):
'''varDecl : arrayDecl
| type VAR'''
if len(p)==3:
p[0] = AST.Variable(p[2], p[1], None)
else:
p[0] = p[1]
def p_params(p):
'''params : expressions
| TOKSEQ fsmStmts TOKENDSEQ'''
def p_lvalue(p):
'''lvalue : VAR
| LPAREN lvalue RPAREN
| lvalue DOT VAR
| TOKACTION fsmStmts TOKENDACTION
| lvalue LBRACKET expression RBRACKET
| lvalue LBRACKET expression COLON expression RBRACKET
| TOKMATCH pattern'''
def p_varAssign1(p):
'''varAssign1 : TOKLET VAR EQUAL expression
| TOKLET VAR LARROW expression'''
p[0] = AST.Variable(p[2], None, p[4])
def p_varAssign2(p):
'''varAssign2 : type VAR EQUAL expression
| type VAR LBRACKET expression RBRACKET EQUAL expression
| type VAR LBRACKET expression RBRACKET LBRACKET NUM RBRACKET EQUAL expression
| type VAR LARROW expression'''
p[0] = AST.Variable(p[2], p[1], p[4])
def p_varAssign3(p):
'''varAssign3 : lvalue EQUAL expression
| lvalue LEQ expression
| lvalue LARROW expression'''
p[0] = AST.Variable(p[2], p[1], None)
def p_varAssign(p):
'''varAssign : varAssign1
| varAssign2
| varAssign3'''
def p_ruleCond(p):
'''ruleCond : LPAREN expression RPAREN'''
def p_implicitCond(p):
'''implicitCond :
| TOKIF LPAREN expression RPAREN'''
def p_rule(p):
'''rule : TOKRULE VAR implicitCond SEMICOLON expressionStmts TOKENDRULE colonVar
| TOKRULE VAR ruleCond implicitCond SEMICOLON expressionStmts TOKENDRULE colonVar'''
def p_ifStmt(p):
'''ifStmt : TOKIF LPAREN expression RPAREN fsmStmt
| TOKIF LPAREN expression RPAREN fsmStmt TOKELSE fsmStmt'''
def p_caseItem(p):
'''caseItem : expressions COLON expressionStmt'''
def p_caseItems(p):
'''caseItems :
| caseItems caseItem'''
def p_defaultItem(p):
'''defaultItem :
| TOKDEFAULT expressionStmt
| TOKDEFAULT COLON expressionStmt'''
def p_caseStmt(p):
'''caseStmt : TOKCASE LPAREN expression RPAREN caseItems defaultItem TOKENDCASE
| TOKCASE LPAREN expression RPAREN TOKMATCHES caseItems defaultItem TOKENDCASE'''
def p_forStmt(p):
'''forStmt : TOKFOR LPAREN varAssign SEMICOLON expression SEMICOLON varAssign RPAREN fsmStmt'''
def p_whenStmt(p):
'''whenStmt : TOKWHEN LPAREN expression RPAREN LPAREN expression RPAREN SEMICOLON'''
def p_beginStmt(p):
'''beginStmt : TOKBEGIN expressionStmts TOKEND'''
def p_expressionStmt(p):
'''expressionStmt : TOKRETURN expression SEMICOLON
| fsmStmtDef
| whenStmt
| lvalue SEMICOLON
| lvalue LPAREN params RPAREN DOT expression SEMICOLON
| lvalue LPAREN params RPAREN SEMICOLON
| BUILTINVAR LPAREN expressions RPAREN SEMICOLON
| varAssign SEMICOLON
| varDecl SEMICOLON
| beginStmt
| ifStmt
| caseStmt
| forStmt
| interfaceDef
| functionDef
| methodDef
| moduleDef
| TOKACTION colonVar expressionStmts TOKENDACTION colonVar
| TOKACTIONVALUE colonVar expressionStmts TOKENDACTIONVALUE colonVar
| typeDef
| instanceAttributes rule
| TOKACTION fsmStmts TOKENDACTION
'''
if parseTrace:
print 'ENDSTATEMENT', [pitem for pitem in p]
def p_expressionStmts(p):
'''expressionStmts : expressionStmts expressionStmt
| '''
def p_provisos(p):
'''provisos :
| TOKPROVISOS LPAREN typeParams RPAREN'''
if len(p) == 5:
p[0] = p[3]
else:
p[0] = []
def p_endFunction(p):
'''endFunction : TOKENDFUNCTION colonVar'''
def p_functionBody(p):
'''functionBody : SEMICOLON expressionStmts endFunction'''
def p_functionValue(p):
'''functionValue : EQUAL expression SEMICOLON'''
def p_functionFormal(p):
'''functionFormal : type VAR
| VAR'''
def p_functionFormals(p):
'''functionFormals :
| functionFormal
| functionFormals COMMA functionFormal '''
def p_fsmStmt(p):
'''fsmStmt : TOKSEQ fsmStmts TOKENDSEQ
| TOKPAR fsmStmts TOKENDPAR
| TOKWHILE ruleCond fsmStmt
| expressionStmt'''
def p_fsmStmts(p):
'''fsmStmts : fsmStmt fsmStmts
| fsmStmt'''
def p_fsmStmtDef(p):
'''fsmStmtDef : TOKSTMT VAR EQUAL fsmStmts SEMICOLON'''
def p_functionDef(p):
'''functionDef : instanceAttributes TOKFUNCTION type VAR LPAREN functionFormals RPAREN provisos functionBody
| instanceAttributes TOKFUNCTION VAR LPAREN functionFormals RPAREN provisos functionBody
| instanceAttributes TOKFUNCTION type VAR LPAREN functionFormals RPAREN provisos functionValue
| instanceAttributes TOKFUNCTION VAR LPAREN functionFormals RPAREN provisos functionValue
'''
if len(p) == 9:
# no type
p[0] = AST.Function(p[3], None, p[5])
else:
p[0] = AST.Function(p[4], p[3], p[6])
def p_methodDef(p):
'''methodDef : TOKMETHOD type VAR LPAREN functionFormals RPAREN implicitCond SEMICOLON methodBody
| TOKMETHOD type VAR implicitCond SEMICOLON methodBody
| TOKMETHOD type VAR EQUAL expression SEMICOLON
| TOKMETHOD type VAR LPAREN functionFormals RPAREN EQUAL expression SEMICOLON
| TOKMETHOD VAR LPAREN functionFormals RPAREN EQUAL expression SEMICOLON
| TOKMETHOD VAR EQUAL expression SEMICOLON'''
returnType = p[2]
name = p[3]
params = []
p[0] = AST.Method(name, returnType, params)
def p_methodBody(p):
'''methodBody : expressionStmts endMethod
| endMethod'''
def p_endMethod(p):
'''endMethod : TOKENDMETHOD colonVar'''
def p_unionMember(p):
'''unionMember : type VAR SEMICOLON
| subStruct VAR SEMICOLON
| subUnion VAR SEMICOLON'''
def p_subStruct(p):
'''subStruct : TOKSTRUCT LBRACE structMembers RBRACE'''
def p_structMembers(p):
'''structMembers :
| structMember
| structMembers structMember'''
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
elif len(p) == 3:
p[0] = p[1] + [p[2]]
def p_structMember(p):
'''structMember : type VAR SEMICOLON
| subUnion VAR SEMICOLON'''
p[0] = AST.StructMember(p[1], p[2])
def p_subUnion(p):
'''subUnion : TOKUNION TOKTAGGED LBRACE unionMembers RBRACE'''
def p_unionMembers(p):
'''unionMembers : unionMember
| unionMembers unionMember'''
def p_taggedUnionDef(p):
'''taggedUnionDef : TOKUNION TOKTAGGED LBRACE unionMembers RBRACE'''
def p_structDef(p):
'''structDef : TOKSTRUCT LBRACE structMembers RBRACE'''
p[0] = AST.Struct(p[3])
def p_enumRange(p):
'''enumRange :
| LBRACKET NUM RBRACKET
| LBRACKET NUM COLON NUM RBRACKET'''
def p_enumElement(p):
'''enumElement : VAR enumRange
| VAR enumRange EQUAL NUM'''
if len(p) == 3:
p[0] = [p[1], None]
else:
p[0] = [p[1], p[4]]
def p_enumElements(p):
'''enumElements : enumElement
| enumElements COMMA enumElement'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_enumDef(p):
'''enumDef : TOKENUM LBRACE enumElements RBRACE'''
p[0] = AST.Enum(p[3])
def p_vardot(p):
'''vardot : VAR
| vardot DOT VAR'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[3]
def p_vars(p):
'''vars : vardot
| vars COMMA vardot'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_deriving(p):
'''deriving :
| TOKDERIVING LPAREN vars RPAREN'''
if len(p) == 5:
p[0] = p[3]
else:
p[0] = []
def p_macroDef(p):
'''macroDef : TOKTICKDEFINE VAR expression'''
def p_typeDefBody(p):
'''typeDefBody : taggedUnionDef
| structDef
| enumDef
| type'''
p[0] = p[1]
def p_typeDef(p):
'''typeDef : TOKTYPEDEF typeDefBody VAR deriving SEMICOLON
| TOKTYPEDEF typeDefBody VAR interfaceHashParams deriving SEMICOLON'''
if len(p) == 6:
p[0] = AST.TypeDef(p[2], p[3], [])
else:
p[0] = AST.TypeDef(p[2], p[3], p[4])
def p_interfaceDef(p):
'''interfaceDef : TOKINTERFACE type VAR SEMICOLON expressionStmts TOKENDINTERFACE colonVar
| TOKINTERFACE type VAR EQUAL expression SEMICOLON
| TOKINTERFACE VAR EQUAL expression SEMICOLON'''
if parseTrace:
print 'ENDINTERFACE', [pitem for pitem in p]
def p_formalParam(p):
'''formalParam : type VAR'''
param = AST.Param(p[2], p[1])
p[0] = param
def p_moduleFormalParams(p):
'''moduleFormalParams : formalParam
| TOKFUNCTION type VAR parenthesizedFormalParams
| moduleFormalParams COMMA formalParam
|'''
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
elif len(p) == 5:
p[0] = p[2]
elif len(p) == 4:
p[0] = p[1] + [p[3]]
def p_moduleFormalArg(p):
'''moduleFormalArg : instanceAttributes type
| instanceAttributes type VAR'''
def p_moduleFormalArgs(p):
'''moduleFormalArgs :
| moduleFormalArg
| moduleFormalArgs COMMA moduleFormalArg'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_moduleParamsArgs(p):
'''moduleParamsArgs :
| HASH LPAREN moduleFormalParams RPAREN
| HASH LPAREN moduleFormalParams RPAREN LPAREN moduleFormalArgs RPAREN
| LPAREN moduleFormalArgs RPAREN'''
if len(p) == 8:
p[0] = [ p[3], p[6] ]
elif len(p) == 5:
p[0] = [ p[3], None ]
else:
p[0] = [ None, p[2] ]
def p_attrSpec(p):
'''attrSpec : VAR
| VAR EQUAL expression'''
def p_attrSpecs(p):
'''attrSpecs : attrSpec
| attrSpecs COMMA attrSpec'''
def p_moduleContext(p):
'''moduleContext :
| LBRACKET VAR RBRACKET'''
if len(p) > 2:
p[0] = p[2]
def p_moduleDefHeader(p):
'''moduleDefHeader : instanceAttributes TOKMODULE moduleContext VAR moduleParamsArgs provisos SEMICOLON'''
p[0] = [p[3], p[4], p[5][0], p[5][1], p[6]]
def p_moduleDef(p):
'''moduleDef : moduleDefHeader expressionStmts TOKENDMODULE colonVar'''
if parseTrace:
print 'ENDMODULE', [pitem for pitem in p]
p[0] = AST.Module(p[1][0], p[1][1], p[1][2], p[1][3], p[1][4], p[2])
def p_importBviDef(p):
'''importBviDef : TOKIMPORT STR VAR EQUAL bviModuleDef
| TOKIMPORT STR TOKFUNCTION TOKUACTION VAR LPAREN functionFormals RPAREN SEMICOLON'''
p[0] = p[5]
if len(p) > 6:
p[0] = AST.Module(None, p[5], None, None, None, None)
def p_bviModuleDef(p):
'''bviModuleDef : instanceAttributes TOKMODULE moduleContext VAR moduleParamsArgs provisos SEMICOLON bviExpressionStmts TOKENDMODULE colonVar'''
p[0] = AST.Module(p[3], p[4], p[5][0], p[5][1], p[6], p[8])
def p_bviExpressionStmts(p):
'''bviExpressionStmts : bviExpressionStmts bviExpressionStmt
| bviExpressionStmt '''
def p_bviExpressionStmt(p):
'''bviExpressionStmt : TOKRETURN expression SEMICOLON
| fsmStmtDef
| whenStmt
| lvalue SEMICOLON
| lvalue LPAREN expressions RPAREN SEMICOLON
| BUILTINVAR LPAREN expressions RPAREN SEMICOLON
| varAssign SEMICOLON
| varDecl SEMICOLON
| beginStmt
| ifStmt
| caseStmt
| forStmt
| bviInterfaceDef
| functionDef
| bviMethodDef
| moduleDef
| TOKACTION colonVar expressionStmts TOKENDACTION colonVar
| typeDef
| instanceAttributes rule
| TOKSEQ fsmStmts TOKENDSEQ
| TOKPORT VAR EQUAL expression SEMICOLON
| TOKPARAMETER VAR EQUAL expression SEMICOLON
| TOKDEFAULT_CLOCK VAR LPAREN RPAREN SEMICOLON
| TOKDEFAULT_CLOCK VAR LPAREN VAR RPAREN SEMICOLON
| TOKDEFAULT_RESET VAR LPAREN RPAREN SEMICOLON
| TOKDEFAULT_RESET TOKNO_RESET SEMICOLON
| TOKDEFAULT_RESET VAR LPAREN VAR RPAREN SEMICOLON
| TOKINPUT_CLOCK VAR LPAREN VAR RPAREN EQUAL expression SEMICOLON
| TOKINPUT_RESET VAR LPAREN VAR RPAREN EQUAL expression SEMICOLON
| TOKINPUT_RESET VAR LPAREN RPAREN EQUAL expression SEMICOLON
| TOKOUTPUT_CLOCK VAR LPAREN VAR RPAREN SEMICOLON
| TOKOUTPUT_RESET VAR LPAREN VAR RPAREN SEMICOLON
| TOKSCHEDULE LPAREN vars RPAREN schedOp LPAREN vars RPAREN SEMICOLON'''
def p_schedOp(p):
'''schedOp : TOKCF
| TOKC
| TOKSB
| TOKSBR'''
def p_bviInterfaceDef(p):
'''bviInterfaceDef : TOKINTERFACE type VAR SEMICOLON bviExpressionStmts TOKENDINTERFACE colonVar
| TOKINTERFACE type VAR EQUAL expression SEMICOLON
| TOKINTERFACE VAR EQUAL expression SEMICOLON'''
def p_bviMethodAttributes(p):
'''bviMethodAttributes :
| bviMethodAttributes bviMethodAttribute'''
def p_bviMethodAttribute(p):
'''bviMethodAttribute :
| TOKENABLE LPAREN instanceAttributes VAR RPAREN
| TOKCLOCKED_BY LPAREN instanceAttributes VAR RPAREN
| TOKRESET_BY LPAREN instanceAttributes VAR RPAREN'''
def p_bviMethodDef(p):
'''bviMethodDef : TOKMETHOD VAR LPAREN VAR RPAREN bviMethodAttributes SEMICOLON
| TOKMETHOD VAR VAR LPAREN RPAREN bviMethodAttributes SEMICOLON'''
def p_instanceDeclStmt(p):
'''instanceDeclStmt : varAssign SEMICOLON
| functionDef
| moduleDef'''
p[0] = p[1]
def p_instanceDeclStmts(p):
'''instanceDeclStmts :
| instanceDeclStmt
| instanceDeclStmts instanceDeclStmt'''
def p_instanceDecl(p):
'''instanceDecl : TOKINSTANCE VAR HASH LPAREN typeParams RPAREN provisos SEMICOLON instanceDeclStmts TOKENDINSTANCE'''
p[0] = AST.TypeclassInstance(p[2], p[5], p[7], p[9])
def p_typeClassDeclStmts(p):
'''typeClassDeclStmts :
| moduleDefHeader'''
def p_typeClassDecl(p):
'''typeClassDecl : TOKTYPECLASS VAR HASH LPAREN interfaceFormalParams RPAREN provisos SEMICOLON typeClassDeclStmts TOKENDTYPECLASS'''
p[0] = AST.Typeclass(p[2])
globalimports = []
globalfilename = None
def p_packageStmt(p):
'''packageStmt : interfaceDecl
| typeClassDecl
| functionDef
| instanceDecl
| varDecl SEMICOLON
| varAssign SEMICOLON
| moduleDef
| macroDef
| typeDef
| importBviDef'''
globalv.add_new(p[1])
def p_packageStmts(p):
'''packageStmts :
| packageStmts packageStmt exportDecls'''
def p_beginPackage(p):
'''beginPackage :
| TOKPACKAGE VAR SEMICOLON'''
def p_endPackage(p):
'''endPackage :
| TOKENDPACKAGE colonVar'''
def p_package(p):
'''package : beginPackage exportDecls importDecls packageStmts exportDecls endPackage'''
p[0] = p[4]
def syntax_parse(argdata, inputfilename, bsvdefines, bsvpath):
global globalfilename
globalfilename = inputfilename
data = bsvpreprocess.preprocess(inputfilename, argdata + '\n', bsvdefines, bsvpath)
lexer = lex.lex(errorlog=lex.NullLogger())
parserdir=scripthome+'/syntax'
if not os.path.isdir(parserdir):
os.makedirs(parserdir)
if not (parserdir in sys.path):
sys.path.append(parserdir)
parser = yacc.yacc(optimize=1,errorlog=yacc.NullLogger(),outputdir=parserdir,debugfile=parserdir+'/parser.out')
if noisyFlag:
print 'Parsing:', inputfilename
if parseDebugFlag:
return parser.parse(data,debug=1)
return parser.parse(data)
def generate_bsvcpp(filelist, project_dir, bsvdefines, interfaces, bsvpath):
for inputfile in filelist:
syntax_parse(open(inputfile).read(), inputfile, bsvdefines, bsvpath)
## code generation pass
ilist = []
for i in interfaces:
ifc = globalv.globalvars.get(i)
if not ifc:
print 'Connectal: Unable to locate the interface:', i
for keys in globalv.globalvars:
print ' ', keys
sys.exit(1)
ifc = ifc.instantiate(dict(zip(ifc.params, ifc.params)))
ilist.append(ifc)
for ditem in ifc.decls:
for pitem in ditem.params:
thisType = pitem.type
p = globalv.globalvars.get(thisType.name)
if p and thisType.params and p.params:
myName = '%sL_%s_P' % (thisType.name, '_'.join([t.name for t in thisType.params if t]))
pitem.oldtype = pitem.type
pitem.type = AST.Type(myName, [])
if not globalv.globalvars.get(myName):
globalv.add_new(AST.TypeDef(p.tdtype.instantiate(dict(zip(p.params, thisType.params))), myName, []))
jsondata = AST.serialize_json(ilist, globalimports, bsvdefines)
if project_dir:
cppgen.generate_cpp(project_dir, noisyFlag, jsondata)
bsvgen.generate_bsv(project_dir, noisyFlag, False, jsondata)
if __name__=='__main__':
if len(sys.argv) == 1:
parserdir=scripthome+'/syntax'
sys.path.append(parserdir)
if not os.path.isdir(parserdir):
os.makedirs(parserdir)
parser = yacc.yacc(outputdir=parserdir,debugfile=parserdir+'/parser.out')
import parsetab
sys.exit(0)
ifitems = []
t = os.environ.get('INTERFACES')
if t:
t = t.split()
for item in t:
if item not in ifitems:
ifitems.append(item)
deflist = []
t = os.environ.get('BSVDEFINES_LIST')
if t:
deflist = t.split()
noisyFlag = os.environ.get('D') == '1'
if os.environ.get('D'):
parseDebugFlag=True
if noisyFlag:
parseTrace=True
project_dir = os.environ.get('DTOP')
tmp = os.environ.get('PROTODEBUG')
if tmp:
print 'JSONNN', tmp
j2file = open(tmp).read()
jsondata = json.loads(j2file)
cppgen.generate_cpp(project_dir, noisyFlag, jsondata)
bsvgen.generate_bsv(project_dir, noisyFlag, True, jsondata)
else:
bsvpath = os.environ.get('BSVPATH', []).split(':')
generate_bsvcpp(sys.argv[1:], project_dir, deflist, ifitems, bsvpath)
|
|
"""Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
import functools
from io import TextIOWrapper
import itertools as _itertools
import re
import sys
from token import *
from token import EXACT_TOKEN_TYPES
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
"untokenize", "TokenInfo"]
del token
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permutations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
@functools.lru_cache
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Sorting in reverse order puts the long operators before their prefixes.
# Otherwise if = came before ==, == would get recognized as two instances
# of =.
Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
Funny = group(r'\r?\n', Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = Single
endpats[_prefix + '"'] = Double
endpats[_prefix + "'''"] = Single3
endpats[_prefix + '"""'] = Double3
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for u in (t + '"', t + "'"):
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
indents = []
startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
if tok_type == ENDMARKER:
break
if tok_type == INDENT:
indents.append(token)
continue
elif tok_type == DEDENT:
indents.pop()
self.prev_row, self.prev_col = end
continue
elif tok_type in (NEWLINE, NL):
startline = True
elif startline and indents:
indent = indents[-1]
if start[1] >= len(indent):
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in _itertools.chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = _builtin_open(filename, 'rb')
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
except:
buffer.close()
raise
def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
physical line.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
encoding, consumed = detect_encoding(readline)
empty = _itertools.repeat(b"")
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
return _tokenize(rl_gen.__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
last_line = b''
line = b''
while True: # loop over lines in stream
try:
# We capture the value of the line variable here because
# readline uses the empty string '' to signal end of input,
# hence `line` itself will always be overwritten at the end
# of this loop.
last_line = line
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
pos += len(comment_token)
yield TokenInfo(NL, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif (initial in single_quoted or
token[:2] in single_quoted or
token[:3] in single_quoted):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
# Again, using the first 3 chars of the
# token. This is looking for the matching end
# regex for the correct type of quote
# character. So it's really looking for
# endpats["'"] or endpats['"'], by trying to
# skip string prefix characters, if any.
endprog = _compile(endpats.get(initial) or
endpats.get(token[1]) or
endpats.get(token[2]))
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
# Add an implicit NEWLINE if the input doesn't end in one
if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
def generate_tokens(readline):
"""Tokenize a source reading Python code as unicode strings.
This has the same API as tokenize(), except that it expects the *readline*
callable to return str objects instead of bytes.
"""
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
sys.stderr.write(message)
sys.stderr.write('\n')
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with _builtin_open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
|
|
##
# Copyright (c) 2008-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedClass
from twistedcaldav.config import config
from twistedcaldav.directory.calendaruserproxy import ProxySqliteDB
from twistedcaldav.directory.resourceinfo import ResourceInfoDatabase
from twistedcaldav.test.util import StoreTestCase
from twistedcaldav.upgrade import xattrname, upgradeData, updateFreeBusySet, \
removeIllegalCharacters, normalizeCUAddrs, \
loadDelegatesFromXMLintoProxyDB, migrateDelegatesToStore, \
upgradeResourcesXML, upgradeAugmentsXML, migrateAutoSchedule
from txdav.caldav.datastore.index_file import db_basename
from txdav.who.delegates import Delegates
from txdav.xml.parser import WebDAVDocument
import cPickle
import hashlib
import os
import zlib
freeBusyAttr = xattrname(
"{urn:ietf:params:xml:ns:caldav}calendar-free-busy-set"
)
cTagAttr = xattrname(
"{http:%2F%2Fcalendarserver.org%2Fns%2F}getctag"
)
md5Attr = xattrname(
"{http:%2F%2Ftwistedmatrix.com%2Fxml_namespace%2Fdav%2F}getcontentmd5"
)
OLDPROXYFILE = ".db.calendaruserproxy"
NEWPROXYFILE = "proxies.sqlite"
class UpgradeTests(StoreTestCase):
def doUpgrade(self, config):
"""
Perform the actual upgrade. (Hook for parallel tests.)
"""
return upgradeData(config, self.directory)
def setUpInitialStates(self):
self.setUpOldDocRoot()
self.setUpOldDocRootWithoutDB()
self.setUpNewDocRoot()
self.setUpNewDataRoot()
self.setUpDataRootWithProxyDB()
def setUpOldDocRoot(self):
# Set up doc root
self.olddocroot = os.path.abspath(self.mktemp())
os.mkdir(self.olddocroot)
principals = os.path.join(self.olddocroot, "principals")
os.mkdir(principals)
os.mkdir(os.path.join(principals, "__uids__"))
os.mkdir(os.path.join(principals, "users"))
os.mkdir(os.path.join(principals, "groups"))
os.mkdir(os.path.join(principals, "locations"))
os.mkdir(os.path.join(principals, "resources"))
os.mkdir(os.path.join(principals, "sudoers"))
open(os.path.join(principals, OLDPROXYFILE), "w").close()
def setUpOldDocRootWithoutDB(self):
# Set up doc root
self.olddocrootnodb = os.path.abspath(self.mktemp())
os.mkdir(self.olddocrootnodb)
principals = os.path.join(self.olddocrootnodb, "principals")
os.mkdir(principals)
os.mkdir(os.path.join(principals, "__uids__"))
os.mkdir(os.path.join(principals, "users"))
os.mkdir(os.path.join(principals, "groups"))
os.mkdir(os.path.join(principals, "locations"))
os.mkdir(os.path.join(principals, "resources"))
os.mkdir(os.path.join(principals, "sudoers"))
os.mkdir(os.path.join(self.olddocrootnodb, "calendars"))
def setUpNewDocRoot(self):
# Set up doc root
self.newdocroot = os.path.abspath(self.mktemp())
os.mkdir(self.newdocroot)
os.mkdir(os.path.join(self.newdocroot, "calendars"))
def setUpNewDataRoot(self):
# Set up data root
self.newdataroot = os.path.abspath(self.mktemp())
os.mkdir(self.newdataroot)
def setUpDataRootWithProxyDB(self):
# Set up data root
self.existingdataroot = os.path.abspath(self.mktemp())
os.mkdir(self.existingdataroot)
principals = os.path.join(self.existingdataroot, "principals")
os.mkdir(principals)
open(os.path.join(self.existingdataroot, NEWPROXYFILE), "w").close()
@inlineCallbacks
def test_normalUpgrade(self):
"""
Test the behavior of normal upgrade from old server to new.
"""
self.setUpInitialStates()
config.DocumentRoot = self.olddocroot
config.DataRoot = self.newdataroot
# Check pre-conditions
self.assertTrue(os.path.exists(os.path.join(config.DocumentRoot, "principals")))
self.assertTrue(os.path.isdir(os.path.join(config.DocumentRoot, "principals")))
self.assertTrue(os.path.exists(os.path.join(config.DocumentRoot, "principals", OLDPROXYFILE)))
self.assertFalse(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))
(yield self.doUpgrade(config))
# Check post-conditions
self.assertFalse(os.path.exists(os.path.join(config.DocumentRoot, "principals",)))
self.assertTrue(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))
@inlineCallbacks
def test_noUpgrade(self):
"""
Test the behavior of running on a new server (i.e. no upgrade needed).
"""
self.setUpInitialStates()
config.DocumentRoot = self.newdocroot
config.DataRoot = self.existingdataroot
# Check pre-conditions
self.assertFalse(os.path.exists(os.path.join(config.DocumentRoot, "principals")))
self.assertTrue(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))
(yield self.doUpgrade(config))
# Check post-conditions
self.assertFalse(os.path.exists(os.path.join(config.DocumentRoot, "principals",)))
self.assertTrue(os.path.exists(os.path.join(config.DataRoot, NEWPROXYFILE)))
@inlineCallbacks
def test_freeBusyUpgrade(self):
"""
Test the updating of calendar-free-busy-set xattrs on inboxes
"""
self.setUpInitialStates()
directory = self.directory
#
# Verify these values require no updating:
#
# Uncompressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
# Zlib compressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
value = zlib.compress(value)
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
# Pickled XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/BB05932F-DCE7-4195-9ED4-0896EAFF3B0B/calendar</href>\r\n</calendar-free-busy-set>\r\n"
doc = WebDAVDocument.fromString(value)
value = cPickle.dumps(doc.root_element)
self.assertEquals((yield updateFreeBusySet(value, directory)), None)
#
# Verify these values do require updating:
#
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"
# Uncompressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
# Zlib compressed XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
value = zlib.compress(value)
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
# Pickled XML
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"
doc = WebDAVDocument.fromString(value)
value = cPickle.dumps(doc.root_element)
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
#
# Shortname not in directory, return empty string
#
expected = "<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'/>"
value = "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/nonexistent/calendar</href>\r\n</calendar-free-busy-set>\r\n"
newValue = yield updateFreeBusySet(value, directory)
newValue = zlib.decompress(newValue)
self.assertEquals(newValue, expected)
@inlineCallbacks
def verifyDirectoryComparison(self, before, after, reverify=False):
"""
Verify that the hierarchy described by "before", when upgraded, matches
the hierarchy described by "after".
@param before: a dictionary of the format accepted by L{TestCase.createHierarchy}
@param after: a dictionary of the format accepted by L{TestCase.createHierarchy}
@param reverify: if C{True}, re-verify the hierarchy by upgrading a
second time and re-verifying the root again.
@raise twisted.trial.unittest.FailTest: if the test fails.
@return: C{None}
"""
root = self.createHierarchy(before)
config.DocumentRoot = root
config.DataRoot = root
(yield self.doUpgrade(config))
self.assertTrue(self.verifyHierarchy(root, after))
if reverify:
# Ensure that repeating the process doesn't change anything
(yield self.doUpgrade(config))
self.assertTrue(self.verifyHierarchy(root, after))
@inlineCallbacks
def test_removeNotificationDirectories(self):
"""
The upgrade process should remove unused notification directories in
users' calendar homes, as well as the XML files found therein.
"""
before = {
"calendars": {
"users": {
"wsanchez": {
"calendar": {
db_basename: {
"@contents": "",
},
},
"notifications": {
"sample-notification.xml": {
"@contents": "<?xml version='1.0'>\n<should-be-ignored />"
}
}
}
}
}
}
after = {
"calendars": {
"__uids__": {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
}
}
}
}
},
".calendarserver_version": {
"@contents": "2",
},
}
(yield self.verifyDirectoryComparison(before, after))
@inlineCallbacks
def test_calendarsUpgradeWithTypes(self):
"""
Verify that calendar homes in the /calendars/<type>/<shortname>/ form
are upgraded to /calendars/__uids__/XX/YY/<guid> form
"""
before = {
"calendars":
{
"users":
{
"wsanchez":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_before,
"@xattrs":
{
md5Attr: "12345",
},
},
"@xattrs":
{
cTagAttr: "12345",
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Pickled XML Doc
freeBusyAttr: cPickle.dumps(WebDAVDocument.fromString("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n").root_element),
},
},
},
},
"groups":
{
"managers":
{
"calendar":
{
db_basename: {
"@contents": "",
},
},
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
"@xattrs":
{
md5Attr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\r\n" % (event01_after_md5,)),
},
},
"@xattrs":
{
cTagAttr: isValidCTag, # method below
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"),
},
},
},
},
},
"9F":
{
"F6":
{
"9FF60DAD-0BDE-4508-8C77-15F0CA5C8DD1":
{
"calendar":
{
db_basename: {
"@contents": "",
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithOrphans(self):
"""
Verify that calendar homes in the /calendars/<type>/<shortname>/ form
whose records don't exist are moved into dataroot/archived/
"""
before = {
"calendars":
{
"users":
{
"unknownuser":
{
},
},
"groups":
{
"unknowngroup":
{
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithDuplicateOrphans(self):
"""
Verify that calendar homes in the /calendars/<type>/<shortname>/ form
whose records don't exist are moved into dataroot/archived/
"""
before = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
},
"calendars":
{
"users":
{
"unknownuser":
{
},
},
"groups":
{
"unknowngroup":
{
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
"archived":
{
"unknownuser":
{
},
"unknowngroup":
{
},
"unknownuser.1":
{
},
"unknowngroup.1":
{
},
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithUnknownFiles(self):
"""
Unknown files, including .DS_Store files at any point in the hierarchy,
as well as non-directory in a user's calendar home, will be ignored and not
interrupt an upgrade.
"""
ignoredUIDContents = {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
"garbage.ics": {
"@contents": "Oops, not actually an ICS file.",
},
"other-file.txt": {
"@contents": "Also not a calendar collection."
},
}
}
},
".DS_Store": {
"@contents": "",
}
}
before = {
".DS_Store":
{
"@contents": "",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": ignoredUIDContents,
},
"principals":
{
".DS_Store":
{
"@contents": "",
},
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".DS_Store":
{
"@contents": "",
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": ignoredUIDContents,
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithNestedCollections(self):
"""
Unknown files, including .DS_Store files at any point in the hierarchy,
as well as non-directory in a user's calendar home, will be ignored and not
interrupt an upgrade.
"""
beforeUIDContents = {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
"nested1": {
"nested2": {},
},
}
}
},
".DS_Store": {
"@contents": "",
}
}
afterUIDContents = {
"64": {
"23": {
"6423F94A-6B76-4A3A-815B-D52CFD77935D": {
"calendar": {
db_basename: {
"@contents": "",
},
},
".collection.nested1": {
"nested2": {},
},
}
}
},
".DS_Store": {
"@contents": "",
}
}
before = {
".DS_Store":
{
"@contents": "",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": beforeUIDContents,
},
"principals":
{
".DS_Store":
{
"@contents": "",
},
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".DS_Store":
{
"@contents": "",
},
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
".DS_Store":
{
"@contents": "",
},
"__uids__": afterUIDContents,
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithUIDs(self):
"""
Verify that calendar homes in the /calendars/__uids__/<guid>/ form
are upgraded to /calendars/__uids__/XX/YY/<guid>/ form
"""
before = {
"calendars":
{
"__uids__":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_before,
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Plain XML
freeBusyAttr: "<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n",
},
},
},
},
},
"principals":
{
OLDPROXYFILE:
{
"@contents": "",
}
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
},
"@xattrs":
{
cTagAttr: isValidCTag, # method below
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithUIDsMultilevel(self):
"""
Verify that calendar homes in the /calendars/__uids__/XX/YY/<guid>/
form are upgraded correctly in place
"""
before = {
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_before,
"@xattrs":
{
md5Attr: "12345",
},
},
"@xattrs":
{
xattrname("ignore"): "extra",
cTagAttr: "12345",
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Zlib compressed XML
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\r\n</calendar-free-busy-set>\r\n"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": "",
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
"@xattrs":
{
md5Attr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\r\n" % (event01_after_md5,)),
},
},
"@xattrs":
{
xattrname("ignore"): "extra",
cTagAttr: isValidCTag, # method below
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after, reverify=True))
@inlineCallbacks
def test_calendarsUpgradeWithNoChange(self):
"""
Verify that calendar homes in the /calendars/__uids__/XX/YY/<guid>/
form which require no changes are untouched
"""
before = {
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
"@xattrs":
{
md5Attr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\r\n" % (event01_after_md5,)),
},
},
"@xattrs":
{
xattrname("ignore"): "extra",
cTagAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\r\n"),
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Zlib compressed XML
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>\r\n"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": "",
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
"@xattrs":
{
md5Attr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\r\n" % (event01_after_md5,)),
},
},
"@xattrs":
{
xattrname("ignore"): "extra",
cTagAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<getctag xmlns='http://calendarserver.org/ns/'>2009-02-25 14:34:34.703093</getctag>\r\n"),
},
},
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>\r\n"),
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after))
@inlineCallbacks
def test_calendarsUpgradeWithInboxItems(self):
"""
Verify that inbox items older than 60 days are deleted
"""
before = {
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
# Zlib compressed XML
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>\r\n"),
},
"oldinboxitem": {
"@contents": "",
"@timestamp": 1, # really old file
},
"newinboxitem": {
"@contents": "",
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": "",
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"inboxitems.txt":
{
"@contents": None, # ignore contents, the paths inside are random test directory paths
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935D":
{
"inbox":
{
db_basename: {
"@contents": "",
},
"@xattrs":
{
freeBusyAttr: zlib.compress("<?xml version='1.0' encoding='UTF-8'?>\r\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\r\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\r\n</calendar-free-busy-set>\r\n"),
},
"newinboxitem": {
"@contents": "",
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
(yield self.verifyDirectoryComparison(before, after))
@inlineCallbacks
def test_calendarsUpgradeWithError(self):
"""
Verify that a problem with one resource doesn't stop the process, but
also doesn't write the new version file
"""
before = {
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935E":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_before,
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics":
{
"@contents": event02_broken,
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": "",
}
}
after = {
".calendarserver_version":
{
"@contents": "2",
},
"calendars":
{
"__uids__":
{
"64":
{
"23":
{
"6423F94A-6B76-4A3A-815B-D52CFD77935E":
{
"calendar":
{
db_basename: {
"@contents": "",
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics":
{
"@contents": event01_after,
},
"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics":
{
"@contents": event02_broken,
},
},
},
},
},
},
},
NEWPROXYFILE:
{
"@contents": None,
},
}
root = self.createHierarchy(before)
config.DocumentRoot = root
config.DataRoot = root
(yield self.doUpgrade(config))
self.assertTrue(self.verifyHierarchy(root, after))
def test_removeIllegalCharacters(self):
"""
Control characters aside from NL and CR are removed.
"""
data = "Contains\x03 control\x06 characters\x12 some\x0a\x09allowed\x0d"
after, changed = removeIllegalCharacters(data)
self.assertEquals(after, "Contains control characters some\x0a\x09allowed\x0d")
self.assertTrue(changed)
data = "Contains\x09only\x0a legal\x0d"
after, changed = removeIllegalCharacters(data)
self.assertEquals(after, "Contains\x09only\x0a legal\x0d")
self.assertFalse(changed)
@inlineCallbacks
def test_normalizeCUAddrs(self):
"""
Ensure that calendar user addresses (CUAs) are cached so we can
reduce the number of principal lookup calls during upgrade.
"""
class StubRecord(object):
def __init__(self, fullNames, uid, cuas):
self.fullNames = fullNames
self.uid = uid
self.calendarUserAddresses = cuas
def getCUType(self):
return "INDIVIDUAL"
@property
def displayName(self):
return self.fullNames[0]
class StubDirectory(object):
def __init__(self):
self.count = 0
def recordWithCalendarUserAddress(self, cuaddr):
self.count += 1
record = records.get(cuaddr, None)
if record is not None:
return succeed(record)
else:
raise Exception
records = {
"mailto:a@example.com":
StubRecord(("User A",), u"123", ("mailto:a@example.com", "urn:x-uid:123")),
"mailto:b@example.com":
StubRecord(("User B",), u"234", ("mailto:b@example.com", "urn:x-uid:234")),
"/principals/users/a":
StubRecord(("User A",), u"123", ("mailto:a@example.com", "urn:x-uid:123")),
"/principals/users/b":
StubRecord(("User B",), u"234", ("mailto:b@example.com", "urn:x-uid:234")),
}
directory = StubDirectory()
cuaCache = {}
yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
yield normalizeCUAddrs(normalizeEvent, directory, cuaCache)
# Ensure we only called principalForCalendarUserAddress 3 times. It
# would have been 8 times without the cuaCache.
self.assertEquals(directory.count, 3)
@inlineCallbacks
def test_migrateDelegates(self):
store = self.storeUnderTest()
record = yield self.directory.recordWithUID(u"mercury")
txn = store.newTransaction()
writeDelegates = yield Delegates.delegatesOf(txn, record, True)
self.assertEquals(len(writeDelegates), 0)
yield txn.commit()
# Load delegates from xml into sqlite
sqliteProxyService = ProxySqliteDB("proxies.sqlite")
proxyFile = os.path.join(config.DataRoot, "proxies.xml")
yield loadDelegatesFromXMLintoProxyDB(proxyFile, sqliteProxyService)
sqliteProxyService.close()
# Load delegates from sqlite into store
yield migrateDelegatesToStore(store)
# Check delegates in store
txn = store.newTransaction()
writeDelegates = yield Delegates.delegatesOf(txn, record, True)
self.assertEquals(len(writeDelegates), 1)
self.assertEquals(
set([d.uid for d in writeDelegates]),
set([u"left_coast"])
)
record = yield self.directory.recordWithUID(u"non_calendar_proxy")
readDelegates = yield Delegates.delegatesOf(txn, record, False)
self.assertEquals(len(readDelegates), 1)
self.assertEquals(
set([d.uid for d in readDelegates]),
set([u"recursive2_coasts"])
)
yield txn.commit()
@inlineCallbacks
def test_migrateAutoSchedule(self):
self.patch(config.AugmentService, "params", {"xmlFiles": ["augments.xml"]})
serviceClass = {
"xml": "twistedcaldav.directory.augment.AugmentXMLDB",
}
augmentClass = namedClass(serviceClass[config.AugmentService.type])
# Auto-schedule not currently set
augmentService = augmentClass(**config.AugmentService.params)
augmentRecord = (
yield augmentService.getAugmentRecord(
"mercury",
"locations"
)
)
self.assertEqual(augmentRecord.autoScheduleMode, "default")
# Create bogus record in resourceinfo db
resourceInfoDatabase = ResourceInfoDatabase(config.DataRoot)
resourceInfoDatabase._db_execute(
"insert into RESOURCEINFO (GUID, AUTOSCHEDULE) values (:1, :2)",
"mercury", 1,
)
resourceInfoDatabase._db_execute(
"insert into RESOURCEINFO (GUID, AUTOSCHEDULE) values (:1, :2)",
None, 1,
)
resourceInfoDatabase._db_commit()
# Migrate auto-schedule from sqlite into directory
yield migrateAutoSchedule(config, self.directory)
# Auto-schedule now set
augmentService = augmentClass(**config.AugmentService.params)
augmentRecord = (
yield augmentService.getAugmentRecord(
"mercury",
"locations"
)
)
self.assertEqual(augmentRecord.autoScheduleMode, "automatic")
def test_resourcesXML(self):
"""
Verify conversion of old resources.xml format to twext.who.xml format
"""
fileName = self.mktemp()
fp = FilePath(fileName)
fp.setContent(oldResourcesFormat)
upgradeResourcesXML(fp)
self.assertEquals(fp.getContent(), newResourcesFormat)
def test_augmentsXML(self):
"""
Verify conversion of old augments.xml auto-schedule related elements to twext.who format
"""
fileName = self.mktemp()
fp = FilePath(fileName)
fp.setContent(oldAugmentsFormat)
upgradeAugmentsXML(fp)
self.assertEquals(fp.getContent(), newAugmentsFormat)
oldResourcesFormat = """<accounts realm="/Search">
<location>
<uid>location1</uid>
<guid>C4F46062-9094-4D34-8591-61A42D993FAA</guid>
<name>location name</name>
</location>
<location>
<uid>5456580a-08ee-4288-8a87-2b4204a62a12</uid>
<guid>5456580a-08ee-4288-8a87-2b4204a62a12</guid>
<name>Fake Room</name>
</location>
<resource>
<uid>resource1</uid>
<guid>60B771CC-D727-4453-ACE0-0FE13CD7445A</guid>
<name>resource name</name>
</resource>
</accounts>
"""
newResourcesFormat = """<directory realm="/Search"><record type="location"><short-name>location1</short-name><guid>C4F46062-9094-4D34-8591-61A42D993FAA</guid><uid>C4F46062-9094-4D34-8591-61A42D993FAA</uid><full-name>location name</full-name></record><record type="location"><short-name>5456580A-08EE-4288-8A87-2B4204A62A12</short-name><guid>5456580A-08EE-4288-8A87-2B4204A62A12</guid><uid>5456580A-08EE-4288-8A87-2B4204A62A12</uid><full-name>Fake Room</full-name></record><record type="resource"><short-name>resource1</short-name><guid>60B771CC-D727-4453-ACE0-0FE13CD7445A</guid><uid>60B771CC-D727-4453-ACE0-0FE13CD7445A</uid><full-name>resource name</full-name></record></directory>"""
oldAugmentsFormat = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE augments SYSTEM "augments.dtd">
<augments>
<record>
<uid>9F3603DD-65D0-480D-A1D1-5D33CAC41A13</uid>
<enable>true</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule>false</auto-schedule>
<auto-schedule-mode>default</auto-schedule-mode>
</record>
<record>
<uid>6A49C436-4CDB-4184-AD87-6F945040E37A</uid>
<enable>true</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule>true</auto-schedule>
</record>
<record>
<uid>60B771CC-D727-4453-ACE0-0FE13CD7445A</uid>
<enable>true</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>none</auto-schedule-mode>
</record>
<record>
<uid>E173AADC-4642-43CB-9745-8CE436A6FE4A</uid>
<enable>false</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule>false</auto-schedule>
<auto-schedule-mode>automatic</auto-schedule-mode>
</record>
<record>
<uid>FC9A7F56-CCCA-4401-9160-903902880A37</uid>
<enable>false</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule>true</auto-schedule>
<auto-schedule-mode>accept-if-free</auto-schedule-mode>
</record>
<record>
<uid>B35DDFCF-C5E2-475E-A57E-8AB7422E9BB8</uid>
<enable>true</enable>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<auto-schedule>false</auto-schedule>
</record>
</augments>
"""
newAugmentsFormat = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE augments SYSTEM "augments.dtd">
<augments>
<record>
<uid>9F3603DD-65D0-480D-A1D1-5D33CAC41A13</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>none</auto-schedule-mode>
</record>
<record>
<uid>6A49C436-4CDB-4184-AD87-6F945040E37A</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>automatic</auto-schedule-mode>
</record>
<record>
<uid>60B771CC-D727-4453-ACE0-0FE13CD7445A</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>none</auto-schedule-mode>
</record>
<record>
<uid>E173AADC-4642-43CB-9745-8CE436A6FE4A</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>none</auto-schedule-mode>
</record>
<record>
<uid>FC9A7F56-CCCA-4401-9160-903902880A37</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<enable-login>true</enable-login>
<auto-schedule-mode>accept-if-free</auto-schedule-mode>
</record>
<record>
<uid>B35DDFCF-C5E2-475E-A57E-8AB7422E9BB8</uid>
<enable-calendar>true</enable-calendar>
<enable-addressbook>true</enable-addressbook>
<auto-schedule-mode>none</auto-schedule-mode>
</record>
</augments>
"""
normalizeEvent = """BEGIN:VCALENDAR
VERSION:2.0
BEGIN:VEVENT
TRANSP:OPAQUE
UID:1E238CA1-3C95-4468-B8CD-C8A399F78C71
DTSTART:20090203
DTEND:20090204
ORGANIZER;CN="User A":mailto:a@example.com
SUMMARY:New Event
DESCRIPTION:Foo
ATTENDEE;CN="User A";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:a@example.com
ATTENDEE;CN="User B";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:b@example.com
ATTENDEE;CN="Unknown";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:unknown@example.com
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
event01_before = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Apple Inc.//iCal 3.0//EN
CALSCALE:GREGORIAN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
SEQUENCE:2
TRANSP:OPAQUE
UID:1E238CA1-3C95-4468-B8CD-C8A399F78C71
DTSTART;TZID=US/Pacific:20090203T120000
ORGANIZER;CN="Cyrus":mailto:cdaboo@example.com
DTSTAMP:20090203T181924Z
SUMMARY:New Event
DESCRIPTION:This has \\" Bad Quotes \\" in it
ATTENDEE;CN="Wilfredo";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:wsanchez
@example.com
ATTENDEE;CN="Double";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED:mailto:doublequotes
@example.com
ATTENDEE;CN="Cyrus";CUTYPE=INDIVIDUAL;PARTSTAT=ACCEPTED;ROLE=REQ-PARTICI
PANT:mailto:cdaboo@example.com
CREATED:20090203T181910Z
DTEND;TZID=US/Pacific:20090203T130000
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
event01_after = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Apple Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYDAY=2SU;BYMONTH=3
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=11
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
UID:1E238CA1-3C95-4468-B8CD-C8A399F78C71
DTSTART;TZID=US/Pacific:20090203T120000
DTEND;TZID=US/Pacific:20090203T130000
ATTENDEE;CN=Wilfredo Sanchez-Vega;CUTYPE=INDIVIDUAL;EMAIL=wsanchez@example
.com;PARTSTAT=ACCEPTED:urn:x-uid:6423F94A-6B76-4A3A-815B-D52CFD77935D
ATTENDEE;CN=Double 'quotey' Quotes;CUTYPE=INDIVIDUAL;EMAIL=doublequotes@ex
ample.com;PARTSTAT=ACCEPTED:urn:x-uid:8E04787E-336D-41ED-A70B-D233AD0DCE6
F
ATTENDEE;CN=Cyrus Daboo;CUTYPE=INDIVIDUAL;EMAIL=cdaboo@example.com;PARTSTA
T=ACCEPTED;ROLE=REQ-PARTICIPANT:urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A
89500
CREATED:20090203T181910Z
DESCRIPTION:This has " Bad Quotes " in it
DTSTAMP:20090203T181924Z
ORGANIZER;CN=Cyrus Daboo;EMAIL=cdaboo@example.com:urn:x-uid:5A985493-EE2C-
4665-94CF-4DFEA3A89500
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
event02_broken = "Invalid!"
event01_after_md5 = hashlib.md5(event01_after).hexdigest()
def isValidCTag(value):
"""
Since ctag is generated from datetime.now(), let's make sure that at
least the value is zlib compressed XML
"""
try:
value = zlib.decompress(value)
except zlib.error:
return False
try:
WebDAVDocument.fromString(value)
return True
except ValueError:
return False
|
|
#!/usr/bin/env python2
# coding: utf-8
import thread
import time
import unittest
from pykit import cacheable
class TestLRU(unittest.TestCase):
def _assert_lru_list(self, lru):
size = lru.size
item_head = lru.head
item_tail = lru.tail
for i in range(size):
item_head = item_head['next']
item_tail = item_tail['pre']
self.assertIs(item_head, lru.tail)
self.assertIs(item_tail, lru.head)
def _assert_lru_items_order(self, lru, order_keys):
item = lru.head['next']
lru_keys = []
while item is not None:
lru_keys.append(item['key'])
item = item['next']
self.assertEqual(order_keys, lru_keys)
def test_lru_timeout(self):
cases = (
('k1', 'v1', 3, True),
('k2', 'v2', 1, False),
)
lru = cacheable.LRU(10, 2)
for key, val, sleep_time, is_timeout in cases:
lru[key] = val
time.sleep(sleep_time)
try:
lru[key]
self.assertFalse(is_timeout)
except KeyError:
self._assert_lru_list(lru)
self.assertTrue(is_timeout)
def test_lru_getitem(self):
capacity = 4
cases = (
(3,
[0, 1, 2],
[]),
(4,
[0, 1, 2, 3],
[]),
(5,
[0, 1, 2, 3, 4],
[]),
(6,
[0, 1, 2, 3, 4, 5],
[]),
(7,
[3, 4, 5, 6],
[0, 1, 2]),
)
for insert_count, exist_items, cleanup_items in cases:
lru = cacheable.LRU(capacity, 10)
for i in range(insert_count):
lru[i] = 'val%d' % (i)
for i in range(insert_count):
try:
val = lru[i]
self.assertEqual(val, 'val%d' % (i))
self.assertEqual(lru.tail['key'], i)
self.assertIn(i, exist_items)
except KeyError:
self.assertIn(i, cleanup_items)
def test_lru_setitem(self):
capacity = 4
cases = (
(3,
[0, 1, 2]),
(4,
[0, 1, 2, 3]),
(5,
[0, 1, 2, 3, 4]),
(6,
[0, 1, 2, 3, 4, 5]),
# size of lru > capacity*1.5
# clean items from head, until size=capacity
(7,
[3, 4, 5, 6]),
)
for insert_count, expect_order_keys in cases:
lru = cacheable.LRU(capacity, 10)
for i in range(insert_count):
lru[i] = 'val'
self._assert_lru_list(lru)
self._assert_lru_items_order(lru, expect_order_keys)
def test_lru_capacity(self):
cases = (
(0,
((0, 0), (1, 0))),
(1,
((1, 1), (2, 1))),
(10,
((9, 9), (10, 10), (13, 13), (15, 15), (16, 10))),
)
for capacity, case in cases:
for insert_count, expect_size in case:
lru = cacheable.LRU(capacity, 60)
for i in range(insert_count):
lru[i] = 'val'
self.assertEqual(lru.size, expect_size)
class TestCacheable(unittest.TestCase):
@cacheable.cache('method_cache_data', capacity=10, timeout=4, is_deepcopy=False)
def _method_cache_data(self, key):
data = need_cache_data.get(key, {})
data['tm'] = time.time()
return data
def test_get_items_from_cache(self):
cache_items = {}
for key in need_cache_data.iterkeys():
cache_items[key] = get_cache_data(key)
time.sleep(0.1)
for key in need_cache_data.iterkeys():
self.assertEqual(get_cache_data(key), cache_items[key])
def test_get_items_from_cache_use_method(self):
cache_items = {}
for key in need_cache_data.iterkeys():
cache_items[key] = self._method_cache_data(key)
time.sleep(0.1)
for key in need_cache_data.iterkeys():
self.assertEqual(self._method_cache_data(key), cache_items[key])
def test_cache_item_timeout_and_cache_again(self):
cases = (
(2, False),
(4, True),
)
tm = get_cache_data('key')['tm']
for sleep_time, is_timeout in cases:
time.sleep(sleep_time)
if is_timeout:
self.assertNotEqual(tm, get_cache_data('key')['tm'])
else:
self.assertEqual(tm, get_cache_data('key')['tm'])
def test_get_deepcopy_item_from_cache(self):
self.assertIsNot(get_deepcopy_of_cache_data('key1'),
get_deepcopy_of_cache_data('key1'))
def test_get_concurrent_update_cache_data(self):
result = {}
def _store_result(key):
result[key] = get_concurrent_update_cache_data()
thread.start_new_thread(_store_result, ('key1',))
time.sleep(1)
thread.start_new_thread(_store_result, ('key2',))
while True:
if 'key2' in result:
break
time.sleep(0.1)
self.assertIsNot(result['key1'], result['key2'])
def test_get_mutext_update_cache_data(self):
result = {}
def _store_result(key):
result[key] = get_mutex_update_cache_data()
thread.start_new_thread(_store_result, ('key1',))
time.sleep(1)
thread.start_new_thread(_store_result, ('key2',))
while True:
if 'key2' in result:
break
time.sleep(0.1)
self.assertIs(result['key1'], result['key2'])
def test_generate_lru_key(self):
cases = (
((),
{},
"[(), []]"),
((1),
{1: 'val_1'},
"[1, [(1, 'val_1')]]"),
((1),
{'a': 'val_a'},
"[1, [('a', 'val_a')]]"),
(('c'),
{'b': 'val_b'},
"['c', [('b', 'val_b')]]"),
((1, 'c'),
{'a': 'val_a', 'b': 'val_b'},
"[(1, 'c'), [('a', 'val_a'), ('b', 'val_b')]]"),
(('c', 1),
{'b': 'val_b', 'a': 'val_a'},
"[('c', 1), [('a', 'val_a'), ('b', 'val_b')]]"),
)
for args, argkv, expect_str in cases:
self.assertEqual(cacheable.Cacheable()._arg_str(args, argkv),
expect_str)
@cacheable.cache('deepcopy_of_cache_data', capacity=100, timeout=60, is_deepcopy=True)
def get_deepcopy_of_cache_data(key):
return need_cache_data.get(key, {})
@cacheable.cache('cache_data', capacity=100, timeout=4, is_deepcopy=False)
def get_cache_data(key):
cache_data = need_cache_data.get(key, {})
cache_data['tm'] = time.time()
return cache_data
@cacheable.cache('concurrent_update_cache_data', capacity=100, timeout=60,
is_deepcopy=False, mutex_update=False)
def get_concurrent_update_cache_data():
time.sleep(3)
return {}
@cacheable.cache('mutex_update_cache_data', capacity=100, timeout=60,
is_deepcopy=False, mutex_update=True)
def get_mutex_update_cache_data():
time.sleep(3)
return {}
need_cache_data = {
'key1': {'tm': 0},
'key2': {'tm': 0},
'key3': {'tm': 0},
'key4': {'tm': 0},
}
|
|
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia feedback reports from both Android and web."""
from __future__ import annotations
import datetime
import enum
from core import feconf
from core import utils
from core.platform import models
from typing import Any, Dict, List, Optional, Sequence, TypeVar
SELF_REPORT_MODEL = TypeVar( # pylint: disable=invalid-name
'SELF_REPORT_MODEL', bound='AppFeedbackReportModel')
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import datastore_services
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
datastore_services = models.Registry.import_datastore_services()
PLATFORM_CHOICE_ANDROID = 'android'
PLATFORM_CHOICE_WEB = 'web'
PLATFORM_CHOICES = [PLATFORM_CHOICE_ANDROID, PLATFORM_CHOICE_WEB]
GITHUB_REPO_CHOICES = PLATFORM_CHOICES
# TODO(#14419): Change naming style of Enum class from SCREAMING_SNAKE_CASE
# to PascalCase and its values to UPPER_CASE. Because we want to be consistent
# throughout the codebase according to the coding style guide.
# https://github.com/oppia/oppia/wiki/Coding-style-guide
# The model field names that can be filtered / sorted for when maintainers
# triage feedback reports.
class FILTER_FIELD_NAMES(enum.Enum): # pylint: disable=invalid-name
"""Enum for the model field names that can be filtered"""
platform = 'platform' # pylint: disable=invalid-name
report_type = 'report_type' # pylint: disable=invalid-name
entry_point = 'entry_point' # pylint: disable=invalid-name
submitted_on = 'submitted_on' # pylint: disable=invalid-name
android_device_model = 'android_device_model' # pylint: disable=invalid-name
android_sdk_version = 'android_sdk_version' # pylint: disable=invalid-name
text_language_code = 'text_language_code' # pylint: disable=invalid-name
audio_language_code = 'audio_language_code' # pylint: disable=invalid-name
platform_version = 'platform_version' # pylint: disable=invalid-name
android_device_country_locale_code = 'android_device_country_locale_code' # pylint: disable=invalid-name
# An ID used for stats model entities tracking all unticketed reports.
UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID = (
'unticketed_android_reports_stats_ticket_id')
class AppFeedbackReportModel(base_models.BaseModel):
"""Model for storing feedback reports sent from learners.
Instances of this model contain information about learner's device and Oppia
app settings, as well as information provided by the user in the feedback
report.
The id of each model instance is determined by concatenating the platform,
the timestamp of the report's submission date (in sec since epoch, in UTC),
and a hash of a string representation of a random int.
"""
# We use the model id as a key in the Takeout dict.
ID_IS_USED_AS_TAKEOUT_KEY = True
# The platform (web or Android) that the report is sent from and that the
# feedback corresponds to.
platform = datastore_services.StringProperty(
required=True, indexed=True,
choices=PLATFORM_CHOICES)
# The ID of the user that scrubbed this report, if it has been scrubbed.
scrubbed_by = datastore_services.StringProperty(
required=False, indexed=True)
# Unique ID for the ticket this report is assigned to (see
# AppFeedbackReportTicketModel for how this is constructed). This defaults
# to None since initially, new reports received will not be assigned to a
# ticket.
ticket_id = datastore_services.StringProperty(required=False, indexed=True)
# The local datetime of when the report was submitted by the user on their
# device. This may be much earlier than the model entity's creation date if
# the report was locally cached for a long time on an Android device.
submitted_on = datastore_services.DateTimeProperty(
required=True, indexed=True)
# The nuber of hours offset from UTC of the user's local timezone.
local_timezone_offset_hrs = datastore_services.IntegerProperty(
required=False, indexed=True)
# The type of feedback for this report; this can be an arbitrary string
# since future iterations of the report structure may introduce new types
# and we cannot rely on the backend updates to fully sync with the frontend
# report updates.
report_type = datastore_services.StringProperty(required=True, indexed=True)
# The category that this feedback is for. Possible categories include:
# suggestion_feature, suggestion_language, suggestion_other,
# issue_lesson_question, issue_general_language, issue_audio_language,
# issue_text_language, issue_topics, issue_profile, issue_other, crash.
category = datastore_services.StringProperty(required=True, indexed=True)
# The version of the app; on Android this is the package version name (e.g.
# 0.1-alpha-abcdef1234) and on web this is the release version (e.g. 3.0.8).
platform_version = datastore_services.StringProperty(
required=True, indexed=True)
# The entry point location that the user is accessing the feedback report
# from on both web & Android devices. Possible entry points include:
# navigation_drawer, lesson_player, revision_card, or crash.
entry_point = datastore_services.StringProperty(required=True, indexed=True)
# Additional topic / story / exploration IDs that may be collected depending
# on the entry_point used to send the report; a lesson player entry point
# will have topic_id, story_id, and exploration_id, while revision cards
# will have topic_id and subtopic_id.
entry_point_topic_id = datastore_services.StringProperty(
required=False, indexed=True)
entry_point_story_id = datastore_services.StringProperty(
required=False, indexed=True)
entry_point_exploration_id = datastore_services.StringProperty(
required=False, indexed=True)
entry_point_subtopic_id = datastore_services.IntegerProperty(
required=False, indexed=True)
# The text language on Oppia set by the user in its ISO-639 language code;
# this is set by the user in Oppia's app preferences on all platforms.
text_language_code = datastore_services.StringProperty(
required=True, indexed=True)
# The audio language ISO-639 code on Oppia set by the user; this is set in
# Oppia's app preferences on all platforms.
audio_language_code = datastore_services.StringProperty(
required=True, indexed=True)
# The user's country locale represented as a ISO-3166 code; the locale is
# determined by the user's Android device settings.
android_device_country_locale_code = datastore_services.StringProperty(
required=False, indexed=True)
# The Android device model used to submit the report.
android_device_model = datastore_services.StringProperty(
required=False, indexed=True)
# The Android SDK version on the user's device.
android_sdk_version = datastore_services.IntegerProperty(
required=False, indexed=True)
# The feedback collected for Android reports; None if the platform is 'web'.
android_report_info = datastore_services.JsonProperty(
required=False, indexed=False)
# The schema version for the feedback report info; None if the platform is
# 'web'.
android_report_info_schema_version = datastore_services.IntegerProperty(
required=False, indexed=True)
# The feedback collected for Web reports; None if the platform is 'android'.
web_report_info = datastore_services.JsonProperty(
required=False, indexed=False)
# The schema version for the feedback report info; None if the platform is
# 'android'.
web_report_info_schema_version = datastore_services.IntegerProperty(
required=False, indexed=True)
# TODO(#13523): Change 'android_report_info' and 'web_report_info' to domain
# objects/TypedDict to remove Any from type-annotation below.
@classmethod
def create(
cls,
entity_id: str,
platform: str,
submitted_on: datetime.datetime,
local_timezone_offset_hrs: int,
report_type: str,
category: str,
platform_version: str,
android_device_country_locale_code: Optional[str],
android_sdk_version: Optional[int],
android_device_model: Optional[str],
entry_point: str,
entry_point_topic_id: Optional[str],
entry_point_story_id: Optional[str],
entry_point_exploration_id: Optional[str],
entry_point_subtopic_id: Optional[str],
text_language_code: str,
audio_language_code: str,
android_report_info: Optional[Dict[str, Any]],
web_report_info: Optional[Dict[str, Any]]
) -> str:
"""Creates a new AppFeedbackReportModel instance and returns its ID.
Args:
entity_id: str. The ID used for this entity.
platform: str. The platform the report is submitted on.
submitted_on: datetime.datetime. The date and time the report was
submitted, in the user's local time zone.
local_timezone_offset_hrs: int. The hours offset from UTC of the
user's local time zone.
report_type: str. The type of report.
category: str. The category the report is providing feedback on.
platform_version: str. The version of Oppia that the report was
submitted on.
android_device_country_locale_code: str|None. The ISO-3166 code for
the user's country locale or None if it's a web report.
android_sdk_version: int|None. The SDK version running when on the
device or None if it's a web report.
android_device_model: str|None. The device model of the Android
device, or None if it's a web report.
entry_point: str. The entry point used to start the report.
entry_point_topic_id: str|None. The current topic ID depending on
the type of entry point used.
entry_point_story_id: str|None. The current story ID depending on
the type of entry point used.
entry_point_exploration_id: str|None. The current exploration ID
depending on the type of entry point used.
entry_point_subtopic_id: int|None. The current subtopic ID depending
on the type of entry point used.
text_language_code: str. The ISO-639 language code for the text
language set by the user on the Oppia app.
audio_language_code: str. The language code for the audio language
set by the user on the Oppia app, as defined by Oppia (not
necessarily an ISO-639 code).
android_report_info: dict|None. The information collected as part
of the Android-specific feedback report.
web_report_info: dict|None. The information collected as part of the
web-specific feedback report.
Returns:
AppFeedbackReportModel. The newly created AppFeedbackReportModel
instance.
"""
android_schema_version = None
web_schema_version = None
if platform == PLATFORM_CHOICE_ANDROID:
android_schema_version = (
feconf.CURRENT_ANDROID_REPORT_SCHEMA_VERSION)
else:
web_schema_version = (
feconf.CURRENT_WEB_REPORT_SCHEMA_VERSION)
report_entity = cls(
id=entity_id, platform=platform, submitted_on=submitted_on,
local_timezone_offset_hrs=local_timezone_offset_hrs,
report_type=report_type, category=category,
platform_version=platform_version,
android_device_country_locale_code=(
android_device_country_locale_code),
android_sdk_version=android_sdk_version,
android_device_model=android_device_model, entry_point=entry_point,
entry_point_topic_id=entry_point_topic_id,
entry_point_exploration_id=entry_point_exploration_id,
entry_point_story_id=entry_point_story_id,
entry_point_subtopic_id=entry_point_subtopic_id,
text_language_code=text_language_code,
audio_language_code=audio_language_code,
android_report_info=android_report_info,
android_report_info_schema_version=android_schema_version,
web_report_info=web_report_info,
web_report_info_schema_version=web_schema_version)
report_entity.update_timestamps()
report_entity.put()
return entity_id
@classmethod
def generate_id(
cls,
platform: str,
submitted_on_datetime: datetime.datetime
) -> str:
"""Generates key for the instance of AppFeedbackReportModel class in the
required format with the arguments provided.
Args:
platform: str. The platform the user is the report from.
submitted_on_datetime: datetime.datetime. The datetime that the
report was submitted on in UTC.
Returns:
str. The generated ID for this entity using platform,
submitted_on_sec, and a random string, of the form
'[platform].[submitted_on_msec].[random hash]'.
"""
submitted_datetime_in_msec = utils.get_time_in_millisecs(
submitted_on_datetime)
for _ in range(base_models.MAX_RETRIES):
random_hash = utils.convert_to_hash(
str(utils.get_random_int(base_models.RAND_RANGE)),
base_models.ID_LENGTH)
new_id = '%s.%s.%s' % (
platform, int(submitted_datetime_in_msec), random_hash)
if not cls.get_by_id(new_id):
return new_id
raise Exception(
'The id generator for AppFeedbackReportModel is producing too '
'many collisions.')
@classmethod
def get_all_unscrubbed_expiring_report_models(
cls
) -> Sequence[AppFeedbackReportModel]:
"""Fetches the reports that are past their 90-days in storage and must
be scrubbed.
Returns:
list(AppFeedbackReportModel). A list of AppFeedbackReportModel
entities that need to be scrubbed.
"""
datetime_now = datetime.datetime.utcnow()
datetime_before_which_to_scrub = datetime_now - (
feconf.APP_FEEDBACK_REPORT_MAXIMUM_LIFESPAN +
datetime.timedelta(days=1))
# The below return checks for '== None' rather than 'is None' since
# the latter throws "Cannot filter a non-Node argument; received False".
report_models: Sequence[AppFeedbackReportModel] = cls.query(
cls.created_on < datetime_before_which_to_scrub,
cls.scrubbed_by == None # pylint: disable=singleton-comparison
).fetch()
return report_models
@classmethod
def get_filter_options_for_field(
cls, filter_field: FILTER_FIELD_NAMES
) -> List[str]:
"""Fetches values that can be used to filter reports by.
Args:
filter_field: FILTER_FIELD_NAME. The enum type of the field we want
to fetch all possible values for.
Returns:
list(str). The possible values that the field name can have.
"""
query = cls.query(projection=[filter_field.name], distinct=True)
filter_values = []
if filter_field == FILTER_FIELD_NAMES.report_type:
filter_values = [model.report_type for model in query]
elif filter_field == FILTER_FIELD_NAMES.platform:
filter_values = [model.platform for model in query]
elif filter_field == FILTER_FIELD_NAMES.entry_point:
filter_values = [model.entry_point for model in query]
elif filter_field == FILTER_FIELD_NAMES.submitted_on:
filter_values = [model.submitted_on.date() for model in query]
elif filter_field == FILTER_FIELD_NAMES.android_device_model:
filter_values = [model.android_device_model for model in query]
elif filter_field == FILTER_FIELD_NAMES.android_sdk_version:
filter_values = [model.android_sdk_version for model in query]
elif filter_field == FILTER_FIELD_NAMES.text_language_code:
filter_values = [model.text_language_code for model in query]
elif filter_field == FILTER_FIELD_NAMES.audio_language_code:
filter_values = [model.audio_language_code for model in query]
elif filter_field == FILTER_FIELD_NAMES.platform_version:
filter_values = [model.platform_version for model in query]
elif filter_field == (
FILTER_FIELD_NAMES.android_device_country_locale_code):
filter_values = [
model.android_device_country_locale_code for model in query]
else:
raise utils.InvalidInputException(
'The field %s is not a valid field to filter reports on' % (
filter_field.name))
return filter_values
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model stores the user ID of who has scrubbed this report for auditing
purposes but otherwise does not contain data directly corresponding to
the user themselves.
"""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model contains data referencing user and will be exported."""
return dict(super(cls, cls).get_export_policy(), **{
'platform': base_models.EXPORT_POLICY.EXPORTED,
'scrubbed_by': base_models.EXPORT_POLICY.EXPORTED,
'ticket_id': base_models.EXPORT_POLICY.EXPORTED,
'submitted_on': base_models.EXPORT_POLICY.EXPORTED,
'local_timezone_offset_hrs': base_models.EXPORT_POLICY.EXPORTED,
'report_type': base_models.EXPORT_POLICY.EXPORTED,
'category': base_models.EXPORT_POLICY.EXPORTED,
'platform_version': base_models.EXPORT_POLICY.EXPORTED,
'android_device_country_locale_code': (
base_models.EXPORT_POLICY.NOT_APPLICABLE),
'android_device_model': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'android_sdk_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entry_point': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entry_point_topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entry_point_story_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entry_point_exploration_id': (
base_models.EXPORT_POLICY.NOT_APPLICABLE),
'entry_point_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'text_language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'audio_language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'android_report_info': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'android_report_info_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'web_report_info': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'web_report_info_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def export_data(cls, user_id: str) -> Dict[str, Dict[str, str]]:
"""Exports the data from AppFeedbackReportModel into dict format for
Takeout.
Args:
user_id: str. The ID of the user whose data should be exported;
this would be the ID of the user who has scrubbed the report.
Returns:
dict. Dictionary of the data from AppFeedbackReportModel.
"""
user_data = {}
report_models: Sequence[AppFeedbackReportModel] = (
cls.get_all().filter(cls.scrubbed_by == user_id).fetch())
for report_model in report_models:
submitted_on_msec = utils.get_time_in_millisecs(
report_model.submitted_on)
user_data[report_model.id] = {
'scrubbed_by': report_model.scrubbed_by,
'platform': report_model.platform,
'ticket_id': report_model.ticket_id,
'submitted_on': utils.get_human_readable_time_string(
submitted_on_msec),
'local_timezone_offset_hrs': (
report_model.local_timezone_offset_hrs),
'report_type': report_model.report_type,
'category': report_model.category,
'platform_version': report_model.platform_version
}
return user_data
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model is exported as multiple instances per user since there
are multiple reports relevant to a user.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.MULTIPLE_INSTANCES_PER_USER
@staticmethod
def get_lowest_supported_role() -> str:
"""The lowest supported role for feedback reports will be moderator."""
return feconf.ROLE_ID_MODERATOR
@classmethod
def has_reference_to_user_id(cls, user_id: str) -> bool:
"""Check whether AppFeedbackReportModel exists for user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether a model is associated with the user.
"""
return cls.query(
cls.scrubbed_by == user_id).get(keys_only=True) is not None
class AppFeedbackReportTicketModel(base_models.BaseModel):
"""Model for storing tickets created to triage feedback reports.
Instances of this model contain information about ticket and associated
reports.
The id of each model instance is created by combining the entity's
ticket_name hash, creation timestamp, and a random 16-character string.
"""
# A name for the ticket given by the maintainer, limited to 100 characters.
ticket_name = datastore_services.StringProperty(required=True, indexed=True)
# The platform that the reports in this ticket pertain to.
platform = datastore_services.StringProperty(
required=True, indexed=True,
choices=PLATFORM_CHOICES)
# The Github repository that has the associated issue for this ticket. The
# possible values correspond to GITHUB_REPO_CHOICES. If None then the
# ticket has not yet been assigned to a Github issue.
github_issue_repo_name = datastore_services.StringProperty(
required=False, indexed=True,
choices=GITHUB_REPO_CHOICES)
# The Github issue number that applies to this ticket.
github_issue_number = datastore_services.IntegerProperty(
required=False, indexed=True)
# Whether this ticket has been archived.
archived = datastore_services.BooleanProperty(required=True, indexed=True)
# The datetime in UTC that the newest report in this ticket was created on,
# to help with sorting tickets. If all reports assigned to this ticket have
# been reassigned to a different ticket then this timestamp is None.
newest_report_timestamp = datastore_services.DateTimeProperty(
required=False, indexed=True)
# A list of report IDs associated with this ticket.
report_ids = datastore_services.StringProperty(indexed=True, repeated=True)
@classmethod
def create(
cls,
entity_id: str,
ticket_name: str,
platform: str,
github_issue_repo_name: Optional[str],
github_issue_number: Optional[int],
newest_report_timestamp: datetime.datetime,
report_ids: List[str]
) -> str:
"""Creates a new AppFeedbackReportTicketModel instance and returns its
ID.
Args:
entity_id: str. The ID used for this entity.
ticket_name: str. The name assigned to the ticket by the moderator.
platform: str. The platform that this ticket fixes an issue on,
corresponding to one of PLATFORM_CHOICES.
github_issue_repo_name: str. The name of the Github repo with the
associated Github issue for this ticket.
github_issue_number: int|None. The Github issue number associated
with the ticket, if it has one.
newest_report_timestamp: datetime.datetime. The date and time of the
newest report that is a part of this ticket, by submission
datetime.
report_ids: list(str). The report_ids that are a part of this
ticket.
Returns:
AppFeedbackReportModel. The newly created AppFeedbackReportModel
instance.
"""
ticket_entity = cls(
id=entity_id, ticket_name=ticket_name, platform=platform,
github_issue_repo_name=github_issue_repo_name,
github_issue_number=github_issue_number, archived=False,
newest_report_timestamp=newest_report_timestamp,
report_ids=report_ids)
ticket_entity.update_timestamps()
ticket_entity.put()
return entity_id
@classmethod
def generate_id(cls, ticket_name: str) -> str:
"""Generates key for the instance of AppFeedbackReportTicketModel
class in the required format with the arguments provided.
Args:
ticket_name: str. The name assigned to the ticket on creation.
Returns:
str. The generated ID for this entity using the current datetime in
milliseconds (as the entity's creation timestamp), a SHA1 hash of
the ticket_name, and a random string, of the form
'[creation_datetime_msec]:[hash(ticket_name)]:[random hash]'.
"""
current_datetime_in_msec = utils.get_time_in_millisecs(
datetime.datetime.utcnow())
for _ in range(base_models.MAX_RETRIES):
name_hash = utils.convert_to_hash(
ticket_name, base_models.ID_LENGTH)
random_hash = utils.convert_to_hash(
str(utils.get_random_int(base_models.RAND_RANGE)),
base_models.ID_LENGTH)
new_id = '%s.%s.%s' % (
int(current_datetime_in_msec), name_hash, random_hash)
if not cls.get_by_id(new_id):
return new_id
raise Exception(
'The id generator for AppFeedbackReportTicketModel is producing too'
'many collisions.')
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any information directly corresponding to a
user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model doesn't contain any data directly corresponding to a user."""
return dict(super(cls, cls).get_export_policy(), **{
'ticket_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'platform': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'github_issue_repo_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'github_issue_number': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'archived': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'newest_report_timestamp': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'report_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@staticmethod
def get_lowest_supported_role() -> str:
"""The lowest supported role for feedback report tickets will be
moderator.
"""
return feconf.ROLE_ID_MODERATOR
class AppFeedbackReportStatsModel(base_models.BaseModel):
"""Model for storing aggregate report stats on the tickets created.
Instances of this model contain statistics for different report types based
on the ticket they are assigned to and the date of the aggregation is on.
The id of each model instance is calculated by concatenating the platform,
ticket ID, and the date (in isoformat) this entity is tracking stats for.
"""
# The unique ticket ID that this entity is aggregating for.
ticket_id = datastore_services.StringProperty(required=True, indexed=True)
# The platform that these statistics are for.
platform = datastore_services.StringProperty(
required=True, indexed=True,
choices=PLATFORM_CHOICES)
# The date in UTC that this entity is tracking on -- this should correspond
# to the creation date of the reports aggregated in this model.
stats_tracking_date = datastore_services.DateProperty(
required=True, indexed=True)
# The total number of reports submitted on this date.
total_reports_submitted = datastore_services.IntegerProperty(
required=True, indexed=True)
# JSON struct that maps the daily statistics for this ticket on the date
# specified in stats_tracking_date. The JSON will map each param_name
# (defined by a domain const ALLOWED_STATS_PARAM_NAMES) to a dictionary of
# all the possible param_values for that parameter and the number of reports
# submitted on that day that satisfy that param value, similar to e.g.:
#
# param_name1 : { param_value1 : report_count1,
# param_value2 : report_count2,
# param_value3 : report_count3 },
# param_name2 : { param_value1 : report_count1,
# param_value2 : report_count2,
# param_value3 : report_count3 } }.
daily_param_stats = datastore_services.JsonProperty(
required=True, indexed=False)
# The schema version for parameter statistics in this entity.
daily_param_stats_schema_version = datastore_services.IntegerProperty(
required=True, indexed=True)
@classmethod
def create(
cls,
entity_id: str,
platform: str,
ticket_id: str,
stats_tracking_date: datetime.date,
total_reports_submitted: int,
daily_param_stats: Dict[str, Dict[str, int]]
) -> str:
"""Creates a new AppFeedbackReportStatsModel instance and returns its
ID.
Args:
entity_id: str. The ID used for this entity.
ticket_id: str. The ID for the ticket these stats aggregate on.
platform: str. The platform the stats are aggregating for.
stats_tracking_date: datetime.date. The date in UTC that this entity
is tracking stats for.
total_reports_submitted: int. The total number of reports submitted
on this date.
daily_param_stats: dict. The daily stats for this entity, keyed
by the parameter witch each value mapping a parameter value to
the number of reports that satisfy that parameter value.
Returns:
AppFeedbackReportStatsModel. The newly created
AppFeedbackReportStatsModel instance.
"""
stats_entity = cls(
id=entity_id, ticket_id=ticket_id, platform=platform,
stats_tracking_date=stats_tracking_date,
total_reports_submitted=total_reports_submitted,
daily_param_stats=daily_param_stats,
daily_param_stats_schema_version=(
feconf.CURRENT_FEEDBACK_REPORT_STATS_SCHEMA_VERSION))
stats_entity.update_timestamps()
stats_entity.put()
return entity_id
@classmethod
def calculate_id(
cls,
platform: str,
ticket_id: Optional[str],
stats_tracking_date: datetime.date
) -> str:
"""Generates key for the instance of AppFeedbackReportStatsModel
class in the required format with the arguments provided.
Args:
platform: str. The platform this entity is aggregating on.
ticket_id: str. The ID for the ticket these stats aggregate on.
stats_tracking_date: date. The date these stats are tracking on.
Returns:
str. The ID for this entity of the form
'[platform]:[ticket_id]:[stats_date in YYYY-MM-DD]'.
"""
if ticket_id is None:
ticket_id = UNTICKETED_ANDROID_REPORTS_STATS_TICKET_ID
return '%s:%s:%s' % (
platform, ticket_id, stats_tracking_date.isoformat())
@classmethod
def get_stats_for_ticket(
cls, ticket_id: str
) -> Sequence[AppFeedbackReportStatsModel]:
"""Fetches the stats for a single ticket.
Args:
ticket_id: str. The ID of the ticket to get stats for.
Returns:
list(str). A list of IDs corresponding to
AppFeedbackReportStatsModel entities that record stats on the
ticket.
"""
return cls.query(cls.ticket_id == ticket_id).fetch()
@staticmethod
def get_deletion_policy() -> base_models.DELETION_POLICY:
"""Model doesn't contain any information directly corresponding to a
user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@classmethod
def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:
"""Model doesn't contain any data directly corresponding to a user."""
return dict(super(cls, cls).get_export_policy(), **{
'ticket_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'platform': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'stats_tracking_date': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'total_reports_submitted': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'daily_param_stats_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'daily_param_stats': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@staticmethod
def get_model_association_to_user(
) -> base_models.MODEL_ASSOCIATION_TO_USER:
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@staticmethod
def get_lowest_supported_role() -> str:
"""The lowest supported role for feedback reports stats will be
moderator.
"""
return feconf.ROLE_ID_MODERATOR
|
|
'''
Dts_Mesh.py
Copyright (c) 2003 - 2007 James Urquhart(j_urquhart@btinternet.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import bpy
from .Dts_Stream import *
from .Torque_Util import *
from . import Dts_Stripper
import math
import copy
#############################
# Torque Game Engine
# -------------------------------
# Dts Mesh Class(es) for Python
#############################
'''
- Reads and Writes DTS Meshes
'''
# The Primitive Class
class Primitive:
# types
Triangles = 0x00000000
Strip = 0x40000000
Fan = 0x80000000 # may not be supported in the engine?
TypeMask = 0xC0000000
Indexed = 0x20000000 # All primitives must be indexed
NoMaterial = 0x10000000 # Set if face has no material assigned, else things may screw up
MaterialMask = 0x0FFFFFFF
def __init__(self, fe=0, ne=0, ty=0):
self.firstElement = fe # First index used by primitive
self.numElements = ne # Number of elements in primitive (> 3 usually is a STRIP / FAN)
self.matindex = ty # |'d Value of types + index of material used
# The Cluster Class for clustered meshes
class Cluster:
def __init__(self, sp=0, ep=0, nrm=None, kn=0.0, fc=0, bc=0):
self.startPrimitive = sp # Start primitive index
self.endPrimitive = ep # End primitive index
self.normal = nrm # Normal of cluster
self.k = kn # The "D" in the plane equation
self.frontCluster = fc # Front cluster index
self.backCluster = bc # Back cluster index
# The Main Mesh Class
class DtsMesh:
# Class Constants
smUseEncodedNormals = False # Read in encoded normals for standard meshes. We just ignore this
smUseTriangles = False
smUseOneStrip = False
smMaxStripSize = 7
# Mesh types
T_Standard = 0 # Standard meshes can be moved by bones, but not be deformed by them
T_Skin = 1 # Skin meshes can be deformed by bones
T_Decal = 2 # Decal meshes are a bit obsolete. They were used to plonk bullet holes, etc on shapes
T_Sorted = 3 # Sorted meshes are extended Standard meshes which allow transparent surfaces to be drawn correctly
T_Null = 4 # No mesh. Useful to write dummy meshes in objects.
# Mesh Flags
Billboard = 0x80000000 # Mesh always faces player
HasDetail = 0x40000000 # Mesh has other versions in other detail levels
BillboardZ = 0x20000000 # Mesh always faces player on the Z axis (up)
EncodedNormals = 0x10000000 # Mesh has encoded normals
def __init__(self, t=4):
self.radius = float(0.0) # Radius of mesh
self.numFrames = 1 # Number of frames in mesh
self.matFrames = 1 # Number of IFL material frames in mesh
self.vertsPerFrame = 0 # Vertexes per frame (vertex animation)
self.parent = -1 # Parent mesh (used to share data)
self.flags = 0 # Mesh Flags
self.mtype = t # Type of mesh
self.alwaysWriteDepth = False # Always write depth?
self.verts = [] # Vertexes
self.tverts = [] # Texture verts
self.normals = [] # Normals
self.enormals = array('B') # Encoded normals
self.primitives = [] # Primitives (makes faces from indices)
self.indices = array('H') # Indices for primitives
self.mindices = array('H') # Indices for primitives
self.vindex = array('i') # Vertex indexes for bone influences
self.bindex = array('i') # Bone indexes for influences
self.vweight = array('f') # Vertex weights
self.nodeIndex = array('i') # Node indexes for node transforms (skin mesh)
self.nodeTransforms = [] # Node transforms
self.texgenS = [] # TexGen (U) (decals)
self.texgenT = [] # TexGen (V) (decals)
self.materialIndex = 0 # Material index (decals)
self.clusters = [] # Clusters (bsp for sorted meshes)
self.startCluster = array('i') # Starting cluster
self.startPrimitive = [] # Start primitive in cluster
self.firstVerts = array('i') # First vert index of cluster
self.numVerts = array('i') # Number of verts in cluster
self.firstTVerts = array('i') # First texture verts in cluster
self.bounds = Box() # Bounds of shape
self.center = Vector() # Center of shape
# Morphs
self.morphIndex = array('i')
self.mindex = array('i')
self.mvindex = array('i')
self.mverts = []
def __del__(self):
del self.verts
del self.tverts
del self.normals
del self.enormals
del self.primitives
del self.indices
del self.mindices
del self.vindex
del self.bindex
del self.vweight
del self.nodeIndex
del self.nodeTransforms
del self.texgenS
del self.texgenT
del self.clusters
del self.startCluster
del self.startPrimitive
del self.firstVerts
del self.numVerts
del self.firstTVerts
del self.bounds
del self.center
del self.morphIndex
del self.mindex
del self.mvindex
del self.mverts
def getType(self):
return self.mtype
def setType(self, t):
self.mtype = t
def setFlag(self, f):
self.flags |= f
def getPolyCount(self):
count = 0
for p in range(len(self.primitives)):
if (self.primitives[p].matindex & self.primitives[p].Strip):
count += self.primitives[p].numElements - 2
else:
count += self.primitives[p].numElements / 3
return (count)
def getRadius(self):
return (self.radius)
def getRadiusFrom(self, trans, rot, center):
radius = float(0.0)
for vert in self.verts:
tv = rot.apply(vert) + trans
distance = (tv - center).length()
if distance > radius:
radius = distance
return radius
def getTubeRadiusFrom(self, trans, rot, center):
radius = float(0.0)
for vert in self.verts:
tv = rot.apply(vert) + trans
distance = (tv - center)
distance2 = Vector2(distance[0], distance[1]).length()
if distance2 > radius:
radius = distance2
return radius
def getCenter(self):
return (self.center)
def getBounds(self, trans, rot):
# Compute the bounding box using the given transform
bounds2 = Box()
bounds2.max = Vector(-10e30, -10e30, -10e30)
bounds2.min = Vector(10e30, 10e30, 10e30)
for vert in self.verts:
tv = rot.apply(vert) + trans
if tv.members[0] < bounds2.min.members[0]:
bounds2.min.members[0] = tv.members[0]
if tv.members[1] < bounds2.min.members[1]:
bounds2.min.members[1] = tv.members[1]
if tv.members[2] < bounds2.min.members[2]:
bounds2.min.members[2] = tv.members[2]
if tv.members[0] > bounds2.max.members[0]:
bounds2.max.members[0] = tv.members[0]
if tv.members[1] > bounds2.max.members[1]:
bounds2.max.members[1] = tv.members[1]
if tv.members[2] > bounds2.max.members[2]:
bounds2.max.members[2] = tv.members[2]
return (bounds2)
def setMaterial(self, n):
for p in self.primitives:
p.matindex = (p.matindex & ~Primitive().MaterialMask) | (n & Primitive().MaterialMask)
def getNodeIndexCount(self):
return (len(self.nodeIndex))
def getNodeIndex(self, node):
if (node >= 0) and (node < len(self.nodeIndex)):
return self.nodeIndex[node]
return None
def setNodeTransform(self, node, t, q):
# Build inverse transform, the mesh wants to be able to
# transform the vertices into node space.
t = q.inverse().apply(-t)
row = [t.x(), t.y(), t.z(), 1]
# point * translation * transform (+ a bit of weights) = position
# The toMatrix builds a transposed transform from what we
# want, so we need to pass the original quaternion to get
# the inverse.
self.nodeTransforms[node] = q.toMatrix()
self.nodeTransforms[node].setCol(3, row)
def translate(self, tra):
for v in range(0, len(self.verts)):
self.verts[v] += tra
self.calculateBounds()
self.calculateCenter()
self.calculateRadius()
def rotate(self, rot):
for v in range(0, len(self.verts)):
self.verts[v] = rot.apply(self.verts[v])
self.normals[v] = rot.apply(self.normals[v])
self.calculateBounds()
self.calculateCenter()
self.calculateRadius()
def setCenter(self, c):
self.center = c
def setBounds(self, b):
self.bounds = b
def setRadius(self, r):
self.radius = r
def setFrames(self, n):
self.numFrames = n
self.vertsPerFrame = len(self.verts) / n
def setParent(self, n):
self.parent = n
def calculateBounds(self):
self.bounds.max = Vector(-10e30, -10e30, -10e30)
self.bounds.min = Vector(10e30, 10e30, 10e30)
for vertex in self.verts:
if vertex.members[0] < self.bounds.min.members[0]:
self.bounds.min.members[0] = vertex.members[0]
if vertex.members[1] < self.bounds.min.members[1]:
self.bounds.min.members[1] = vertex.members[1]
if vertex.members[2] < self.bounds.min.members[2]:
self.bounds.min.members[2] = vertex.members[2]
if vertex.members[0] > self.bounds.max.members[0]:
self.bounds.max.members[0] = vertex.members[0]
if vertex.members[1] > self.bounds.max.members[1]:
self.bounds.max.members[1] = vertex.members[1]
if vertex.members[2] > self.bounds.max.members[2]:
self.bounds.max.members[2] = vertex.members[2]
def calculateCenter(self):
self.center[0] = ((self.bounds.min[0] - self.bounds.max[0]) / 2) + self.bounds.max[0]
self.center[1] = ((self.bounds.min[1] - self.bounds.max[1]) / 2) + self.bounds.max[1]
self.center[2] = ((self.bounds.min[2] - self.bounds.max[2]) / 2) + self.bounds.max[2]
def calculateRadius(self):
self.radius = float(0.0)
for vertex in self.verts:
tV = vertex - self.center
distance = math.sqrt(
(tV.members[0] * tV.members[0]) +
(tV.members[1] * tV.members[1]) +
(tV.members[2] * tV.members[2]))
if distance > self.radius:
self.radius = distance
def getVertexBone(self, node):
# Finds the bone index in the table, or adds it if it's
# not there. The vertex bone & nodeIndex list are here to
# track which bones are used by this mesh.
b = 0
while b < len(self.nodeIndex):
if self.nodeIndex[b] == node:
return b
b += 1
self.nodeIndex.append(node)
self.nodeTransforms.append(MatrixF().identity())
return b
def encodeNormal(self, p):
return 0 # disable
global normalTable
bestIndex = 0
x, y, z = p
bestDot = -10E30
for i in range(0, 256):
dot = x * normalTable[i][0] + y * normalTable[i][1] + z * normalTable[i][2]
if dot > bestDot:
bestIndex = i
bestDot = dot
return bestIndex
def read(self, dstream, shape):
# Header and Bounds
if (self.mtype == self.T_Null) or not (self.T_Standard or self.T_Decal or self.T_Skin or self.T_Sorted):
return None # Null mesh, no data!
# Decal Meshes are an exception; They do not use regular mesh assemble!
if self.mtype != self.T_Decal:
dstream.readCheck()
self.numFrames = dstream.reads32() # S32
self.matFrames = dstream.reads32() # S32
self.parent = dstream.reads32() # S32
self.bounds = dstream.readBox() # Box
self.center = dstream.readPoint3F() # Vector
self.radius = dstream.readf32() # Float (32bit)
# If we have a parent, don't read some of this stuff in
# Vertexes
# (Should be 0 if skin mesh)
if self.parent < 0:
n = dstream.reads32()
for cnt in range(0, n):
self.verts.append(dstream.readPoint3F())
else:
dstream.reads32()
self.verts = shape.meshes[self.parent].verts
# Texture Coordinates
if self.parent < 0:
for cnt in range(0, dstream.reads32()):
self.tverts.append(dstream.readPoint2F())
else:
dstream.reads32()
self.tverts = shape.meshes[self.parent].tverts
# Normals
# Real in normals and enormals regardless of if we have them or not
if self.parent < 0:
for cnt in range(0, len(self.verts)):
self.normals.append(dstream.readPoint3F())
for cnt in range(0, len(self.verts)):
dstream.readu8() # dummy read of enormals
else:
self.normals = shape.meshes[self.parent].normals
self.enormals = shape.meshes[self.parent].enormals
# Primitives and other stuff
for cnt in range(0, dstream.reads32()):
self.primitives.append(dstream.readPrimitive())
for cnt in range(0, dstream.reads32()):
self.indices.append(dstream.readu16()) # U16
for cnt in range(0, dstream.reads32()):
self.mindices.append(dstream.readu16()) # U16
self.vertsPerFrame = dstream.reads32()
self.flags = dstream.readu32()
dstream.readCheck()
# Morph data
if dstream.DTSVersion > 24:
for cnt in range(0, dstream.reads32()):
self.morphIndex.append(dstream.reads32())
for cnt in range(0, dstream.reads32()):
self.mindex.append(dstream.reads32())
for cnt in range(0, dstream.reads32()):
self.mvindex.append(dstream.reads32())
for cnt in range(0, dstream.reads32()):
self.mverts.append(dstream.readPoint3F())
dstream.readCheck()
# Woohoo!! Done Reading Mesh Bit...
self.calculateBounds()
# End Standard Mesh Read
# Now for other mesh types
if self.mtype == self.T_Skin:
# Skin Mesh
# store off the true number of verts now...
numVerts = len(self.verts)
# If we have a parent, don't read some of this stuff in
if self.parent < 0:
# Read Initial Verts... (plonked into verts array really)
n = dstream.reads32()
# Meh, if we didn't get the correct number of
# verts before, set it now; otherwise we crash
# kinda hacky.
if n != numVerts: numVerts = n
for cnt in range(0, n):
x = dstream.readPoint3F()
self.verts.append(x)
else:
dstream.reads32()
# Following already done before!
# self.verts = shape.meshes[self.parent].verts
# Normals...
# (note : encoded normals not read)
if self.parent < 0:
# Advance past norms.don't use
for n in range(0, numVerts):
dstream.read8() # Skip
# Read in normals
for n in range(0, numVerts):
self.normals.append(dstream.readPoint3F())
else:
self.normals, self.enormals = shape.meshes[self.parent].normals, shape.meshes[self.parent].enormals
if self.parent < 0:
n = dstream.reads32()
# Read Initial Transforms...
for cnt in range(0, n):
self.nodeTransforms.append(dstream.readMatrixF())
sz = dstream.reads32()
# Read Vertex Indexes...
for cnt in range(0, sz):
self.vindex.append(dstream.reads32())
# Read Bone Indexes...
for cnt in range(0, sz):
self.bindex.append(dstream.reads32())
# Read Vertex Weights...
for cnt in range(0, sz):
self.vweight.append(dstream.readf32())
n = dstream.reads32()
# Read Node Indexes...
for cnt in range(0, n):
self.nodeIndex.append(dstream.reads32())
else:
for i in range(0, 3):
dstream.reads32() # read in sizes
self.nodeTransforms = shape.meshes[self.parent].nodeTransforms
self.vindex = shape.meshes[self.parent].vindex
self.bindex = shape.meshes[self.parent].bindex
self.vweight = shape.meshes[self.parent].vweight
self.nodeIndex = shape.meshes[self.parent].nodeIndex
# And finally, checkpoint =)
dstream.readCheck()
elif self.mtype == self.T_Decal:
# Decal Mesh
# Read Primitives...
nprims = dstream.reads32()
for cnt in range(0, nprims):
self.primitives.append(dstream.readPrimitive())
# Read Indicies...
ninds = dstream.reads32()
for cnt in range(0, ninds):
self.indices.append(dstream.readu16()) # U16
nsps = dstream.reads32()
# Read Start Primitives...
for cnt in range(0, nsps):
self.startPrimitive.append(dstream.reads32()) # S32
# Read in TexGen's
for cnt in range(0, nsps):
self.texgenS.append(dstream.readPoint4F())
for cnt in range(0, nsps):
self.texgenT.append(dstream.readPoint4F())
# Material Index
self.materialIndex = dstream.reads32()
dstream.readCheck()
elif self.mtype == self.T_Sorted:
# Sorted Mesh
# Read Clusters (e.g helmet visor is cluster)
sz = dstream.reads32()
for cnt in range(0, sz):
self.clusters.append(dstream.readCluster())
# Read Start Cluster
sz = dstream.reads32()
for cnt in range(0, sz):
self.startCluster.append(dstream.reads32())
# Read first Verts
nfv = dstream.reads32()
for cnt in range(0, sz):
self.firstVerts.append(dstream.reads32())
# Read num Verts
sz = dstream.reads32()
for cnt in range(0, sz):
self.numVerts.append(dstream.reads32())
sz = dstream.reads32()
for cnt in range(0, sz):
self.firstTVerts.append(dstream.reads32())
self.alwaysWriteDepth = dstream.readu32()
dstream.readCheck()
else:
# Null or Standard or Unknown Mesh
if self.mtype != self.T_Standard:
Torque_Util.dump_writeErr("Error : Cannot read mesh type %d" % (self.mtype))
return True # We are ok
# Write!!
def write(self, dstream):
if self.mtype == self.T_Null:
return None
# Decal Meshes are an exception; They do not use regular mesh assemble!
if self.mtype == self.T_Decal:
# Write Primitives...
dstream.writes32(len(self.primitives))
for cnt in self.primitives:
dstream.writePrimitive(cnt)
# Write Indicies...
dstream.writes32(len(self.indices))
for cnt in self.indices:
dstream.writeu16(cnt) # U16
# Write Start Primitives...
dstream.writes32(len(self.startPrimitive))
for cnt in self.startPrimitive:
dstream.writes32(cnt) # S32
# Read in TexGen's
for cnt in self.texgenS:
dstream.writePoint4F(cnt)
for cnt in self.texgenT:
dstream.writePoint4F(cnt)
# Material Index
dstream.writes32(self.materialIndex)
dstream.storeCheck()
else:
# Note : we only need to write
# some things IF we don't have a parent
dstream.storeCheck()
dstream.writes32(int(self.numFrames)) # S32
dstream.writes32(int(self.matFrames)) # S32
dstream.writes32(int(self.parent)) # S32
dstream.writeBox(self.bounds) # Box
dstream.writePoint3F(self.center) # Vector
dstream.writef32(self.radius) # Float (32bit)
# Vertexes
# Some things should definatly not be written if we are a skin mesh
# (e.g verts should only be written once in the SkinMesh iverts)
if self.mtype == self.T_Skin:
dstream.writes32(0) # 0 verts, but many iverts
else:
dstream.writes32(len(self.verts))
if self.parent < 0:
for v in self.verts:
dstream.writePoint3F(v)
# Texture Coordinates
dstream.writes32(len(self.tverts))
if self.parent < 0:
for v in self.tverts:
dstream.writePoint2F(v)
# Normals
# Write normals and enormals regardless of if we have them or not
# Forget it if we are a skin mesh. (since stored elsewhere)
if self.parent < 0:
if self.mtype != self.T_Skin:
for n in self.normals:
dstream.writePoint3F(n)
for n in self.normals: # enormals dummy write
dstream.writeu8(0)
# Primitives and other stuff
dstream.writes32(len(self.primitives))
for p in self.primitives:
dstream.writePrimitive(p)
dstream.writes32(len(self.indices))
for p in self.indices:
dstream.writeu16(p) # U16
dstream.writes32(len(self.mindices))
for p in self.mindices:
dstream.writeu16(p) # U16
dstream.writes32(self.vertsPerFrame)
dstream.writeu32(self.flags)
dstream.storeCheck()
if dstream.DTSVersion > 24:
# Morph data
dstream.writes32(len(self.morphIndex))
for p in self.morphIndex:
dstream.writes32(p)
dstream.writes32(len(self.mindex))
for p in self.mindex:
dstream.writes32(p)
dstream.writes32(len(self.mvindex))
for p in self.mvindex:
dstream.writes32(p)
dstream.writes32(len(self.mverts))
for vert in self.mverts:
dstream.writesPoint3F(vert)
dstream.storeCheck()
# Now write Other mesh type data
if self.mtype == self.T_Skin:
dstream.writes32(len(self.verts))
if self.parent < 0:
for vert in self.verts:
dstream.writePoint3F(vert)
# Write normals and encoded normals
# NOTE: removed encoded normals write
if self.parent < 0:
for u in self.normals:
dstream.write8(0) # Skip enormals
for n in self.normals:
dstream.writePoint3F(n)
# Write Initial Transforms...
dstream.writes32(len(self.nodeTransforms))
if self.parent < 0:
for vert in self.nodeTransforms:
dstream.writeMatrixF(vert)
# Vertex Indexes...
dstream.writes32(len(self.vindex))
if self.parent < 0:
for vert in self.vindex:
dstream.writes32(vert)
# Bone Indexes...
for vert in self.bindex:
dstream.writes32(vert)
# Vertex Weights...
for vert in self.vweight:
dstream.writef32(vert)
# Node Indexes...
dstream.writes32(len(self.nodeIndex))
if self.parent < 0:
for vert in self.nodeIndex:
dstream.writes32(vert)
dstream.storeCheck()
elif self.mtype == self.T_Sorted:
# Clusters...
dstream.writes32(len(self.clusters))
for c in self.clusters:
dstream.writeCluster(c)
# Start Cluster...
dstream.writes32(len(self.startCluster))
for c in self.startCluster:
dstream.writes32(c)
# First Verts
dstream.writes32(len(self.firstVerts))
for c in self.firstVerts:
dstream.writes32(c)
# Num Verts
dstream.writes32(len(self.numVerts))
for c in self.numVerts:
dstream.writes32(c)
# First Tex Verts
dstream.writes32(len(self.firstTVerts))
for c in self.firstTVerts:
dstream.writes32(c)
dstream.writeu32(self.alwaysWriteDepth)
dstream.storeCheck()
def convertToTris(self, quads=False):
# Converts stuff to regular triangles primitives
# NOTE: Face order *seems* to be slightly off
newinds = array('H')
newprims = []
numStrips = 0
Torque_Util.dump_writeln("Converting Triangle Strip -> Triangles")
for p in self.primitives:
if p.matindex & p.Strip:
if quads and p.numElements == 4:
# Same as lone triangle, but extra vertex
newprims.append(Primitive(len(newinds), 4, p.matindex))
newinds.append(self.indices[p.firstElement])
newinds.append(self.indices[p.firstElement + 1])
newinds.append(self.indices[p.firstElement + 2])
newinds.append(self.indices[p.firstElement + 3])
elif p.numElements > 3:
numStrips += 1
if quads:
addinds, addprims = self.unwindQuadStrip(p, len(newinds))
else:
addinds, addprims = self.unwindStrip(p, len(newinds))
for inds in addinds: newinds.append(inds)
for prim in addprims: newprims.append(prim)
del addinds
del addprims
else: # Its a lone triangle
newprims.append(Primitive(len(newinds), 3, p.matindex))
newinds.append(self.indices[p.firstElement])
newinds.append(self.indices[p.firstElement + 1])
newinds.append(self.indices[p.firstElement + 2])
# else TODO: Support Fan, etc
Torque_Util.dump_writeln("Converted %d strips" % numStrips)
# Finally we have our new primitives
self.indices, self.primitives = newinds, newprims
def unwindQuadStrip(self, strip, offset):
# First, unwind to tris
newinds, newprims = self.unwindStrip(strip, offset)
# TODO
quadinds = newinds
quadprims = newprims
return quadinds, newprims
def unwindStrip(self, strip, offset):
# The purpose of this function is to convert a triangle strip, strip into primitives
newinds = array('H') # New Indices, adds on top of everything else
newprims = [] # New primitives
front = True
# 01234 -> 012 321 234
ind = strip.firstElement + 2
while ind < (strip.firstElement + strip.numElements):
newprims.append(Primitive(len(newinds) + offset, 3, strip.matindex))
if front:
newinds.append(self.indices[ind - 2])
newinds.append(self.indices[ind - 1])
newinds.append(self.indices[ind])
else:
newinds.append(self.indices[ind])
newinds.append(self.indices[ind - 1])
newinds.append(self.indices[ind - 2])
front = not front
ind += 1
return newinds, newprims
'''
Triangle Strip Code
'''
def windStrip(self, max_stripsize):
Dts_Stripper.Stripper.maxStripSize = max_stripsize
stripper = Dts_Stripper.chooseStripper()
if not stripper:
Torque_Util.dump_writeln(" Stripping Mesh : Disabled (No Stripper Found)")
return
else:
Torque_Util.dump_writeln(" Stripping Mesh :")
stripper.verts = self.verts
# Convert primitives in different batches if we are a cluster, else, do it normally
if self.mtype == self.T_Sorted:
newPrimitives = []
newIndices = []
for c in self.clusters:
# We need to update offsets for primitives when we strip (since there will be less of them)
c.startPrimitive = len(newPrimitives)
for p in self.primitives[c.startPrimitive:c.endPrimitive]:
stripper.faces.append([self.indices[p.firstElement:p.firstElement + p.numElements], p.matindex])
# Ready, Steady, Strip!
stripper.strip()
for strip in stripper.strips:
self.primitives.append(Primitive(len(newIndices), len(strip[0]), strip[1]))
for ind in strip[0]:
newIndices.append(ind)
c.endPrimitive = len(newPrimitives)
stripper.clear()
self.indices = newIndices
self.primitives = newPrimitives
else:
# All we need to do is convert the whole set of primitives
for p in self.primitives:
if p.numElements > 3:
# we should be dealing with a triangle list
if (p.numElements % 3) != 0: raise "Error: Wrong number of verts in Triangles primitive!"
for i in range(p.firstElement, p.firstElement + p.numElements, 3):
stripper.faces.append([self.indices[i:i + 3], p.matindex])
else:
# we're dealing a list or strip containing only one triangle
stripper.faces.append([self.indices[p.firstElement:p.firstElement + p.numElements], p.matindex])
stripper.strip()
self.indices = []
self.primitives = []
for strip in stripper.strips:
strip[1] = (strip[1] & Primitive.MaterialMask) | (
Primitive.NoMaterial & strip[1]) | Primitive.Strip | Primitive.Indexed
# strip[1] = (strip[1] & Primitive.MaterialMask) | (Primitive.NoMaterial & strip[1]) | Primitive.Triangles | Primitive.Indexed
self.primitives.append(Primitive(len(self.indices), len(strip[0]), strip[1]))
# print "STRIP:",strip[0]
for ind in strip[0]:
self.indices.append(ind)
del stripper
def passMatrix(self, matrix):
# Applies a matrix to all the verts in the mesh
for v in self.verts:
v = matrix.passPoint(v)
# Duplicates mesh
def duplicate(self):
# Arrays and class objects are the major concern
d = DtsMesh()
d.mtype = self.mtype
d.numFrames = self.numFrames
d.matFrames = self.matFrames
d.parent = self.parent
for v in self.verts:
d.verts.append(Vector(v[0], v[1], v[2]))
for t in self.tverts:
d.tverts.append(Vector2(t[0], t[1]))
for n in self.normals:
d.normals.append(Vector(n[0], n[1], n[2]))
for e in self.enormals:
d.enormals.append(e)
for p in self.primitives:
d.primitives.append(Primitive(p.firstElement, p.numElements, p.matindex))
for i in self.indices:
d.indices.append(i)
for m in self.mindices:
d.mindices.append(m)
d.bounds = Box(Vector(self.bounds.min[0], self.bounds.min[1], self.bounds.min[2]),
Vector(self.bounds.max[0], self.bounds.max[1], self.bounds.max[2]))
d.center = Vector(self.center[0], self.center[1], self.center[2])
d.radius = self.radius
d.vertsPerFrame = self.vertsPerFrame
d.flags = self.flags
for i in self.vindex:
d.vindex.append(i)
for b in self.bindex:
d.bindex.append(b)
for v in self.vweight:
d.vweight.append(v)
for n in self.nodeIndex:
d.nodeIndex.append(n)
for n in self.nodeTransforms:
d.nodeTransforms.append(n.copy())
for p in self.startPrimitive:
d.startPrimitive.append(p)
for ts in self.texgenS:
d.texgenS.append(Vector4(ts[0], ts[1], ts[2], ts[3]))
for tt in self.texgenT:
d.texgenT.append(Vector4(tt[0], tt[1], tt[2], tt[3]))
d.materialIndex = self.materialIndex
for c in self.clusters:
d.clusters.append(Cluster(c.startPrimitive, c.endPrimitive, c.normal, c.k, c.frontCluster, c.backCluster))
for c in self.startCluster:
d.startCluster.append(c)
for v in self.firstVerts:
d.firstVerts.append(v)
for v in self.numVerts:
d.numVerts.append(v)
for t in self.firstTVerts:
d.firstTVerts.append(t)
return d
'''
Sorted Mesh Routines
'''
def generateClusters(self, numBigFaces, maxDepth, zLayerUp, zLayerDown):
'''
on entry, mesh is organized like a standard mesh...
numFrames, numMatFrames, & vertsPerFrame describe what
is held in verts, norms, and tverts arrays
primitives and indices vectors describe the faces (same
faces on each frame/matFrame)
we want to convert this over to the structure that will be
used by TSSortedMesh...we also want to sort the faces, of course...
'''
meshFaces = []
meshIndices = []
meshVerts = []
meshNorms = []
meshTVerts = []
meshClusters = []
for i in range(0, self.numFrames):
for j in range(0, self.matFrames):
faces = self.primitives
indices = self.indices
clusters = []
verts = copy.deepcopy(self.verts)
norms = copy.deepcopy(self.normals)
tverts = copy.deepcopy(self.tverts)
sort = Dts_TranslucentSort.TranslucentSort(faces, indices, verts, norms, tverts, numBigFaces, maxDepth,
zLayerUp, zLayerDown)
sort.sort()
newFaces = []
newIndices = []
sort.generateClusters(clusters, newFaces, newIndices)
k = 0
while k < len(clusters):
if (clusters[k].startPrimitive == clusters[k].endPrimitive and clusters[k].frontCluster == clusters[
k].backCluster):
# this cluster serves no purpose...get rid of it
for l in range(0, len(clusters)):
if l == k:
continue
if clusters[l].frontCluster == k:
clusters[l].frontCluster = clusters[k].frontCluster
if clusters[l].frontCluster > k:
clusters[l].frontCluster -= 1
if clusters[l].backCluster == k:
clusters[l].backCluster = clusters[k].backCluster
if clusters[l].backCluster > k:
clusters[l].backCluster -= 1
del clusters[k]
k = -1 # start over, our parent may now be useless...
k += 1
if j == 0:
self.startCluster.append(len(meshClusters))
self.firstVerts.append(len(meshVerts))
self.numVerts.append(len(verts))
# TODO: if tverts same as some previous frame, use that frame number
# o.w.
self.firstTVerts.append(len(meshTVerts))
# adjust startPrimitive, endPrimitive, frontCluster, & backCluster on list of clusters just generated
for k in range(0, len(clusters)):
cluster = clusters[k]
cluster.startPrimitive += len(meshFaces)
cluster.endPrimitive += len(meshFaces)
cluster.frontCluster += len(meshClusters)
cluster.backCluster += len(meshClusters)
# now merge in just computed verts, tverts, indices, primitives, and clusters...
meshVerts += verts
if self.firstTVerts[-1] == len(meshTVerts):
meshTVerts += tverts
meshNorms += norms
meshIndices += newIndices
meshFaces += newFaces
meshClusters += clusters
self.clusters = meshClusters
self.primitives = meshFaces
self.indices = meshIndices
self.verts = meshVerts
self.normals = meshNorms
self.tverts = meshTVerts
def sortMesh(self, alwaysWriteDepth=False, maxDepth=2, numBigFaces=0, zLayerUp=True, zLayerDown=True):
'''
All we need to do is turn control over to generateClusters.
generateClusters() will construct the BSP tree, and roll any
new primitives / clusters / vertexes into the mesh data
'''
self.alwaysWriteDepth = alwaysWriteDepth
print(" Sorting : WD(%d) NB(%d) MD(%d) ZU(%d), ZD(%d)" % (
alwaysWriteDepth, numBigFaces, maxDepth, zLayerUp, zLayerDown))
self.generateClusters(numBigFaces, maxDepth, zLayerUp, zLayerDown)
print(" Sorting : Done, Generated %d clusters" % len(self.clusters))
from . import Dts_TranslucentSort
# End of file
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import OrderedDict
import numpy as np
from .base import IORegistryError, _UnifiedIORegistryBase
__all__ = ['UnifiedIORegistry', 'UnifiedInputRegistry', 'UnifiedOutputRegistry']
PATH_TYPES = (str, os.PathLike) # TODO! include bytes
# -----------------------------------------------------------------------------
class UnifiedInputRegistry(_UnifiedIORegistryBase):
"""Read-only Unified Registry.
.. versionadded:: 5.0
Examples
--------
First let's start by creating a read-only registry.
.. code-block:: python
>>> from astropy.io.registry import UnifiedInputRegistry
>>> read_reg = UnifiedInputRegistry()
There is nothing in this registry. Let's make a reader for the
:class:`~astropy.table.Table` class::
from astropy.table import Table
def my_table_reader(filename, some_option=1):
# Read in the table by any means necessary
return table # should be an instance of Table
Such a function can then be registered with the I/O registry::
read_reg.register_reader('my-table-format', Table, my_table_reader)
Note that we CANNOT then read in a table with::
d = Table.read('my_table_file.mtf', format='my-table-format')
Why? because ``Table.read`` uses Astropy's default global registry and this
is a separate registry.
Instead we can read by the read method on the registry::
d = read_reg.read(Table, 'my_table_file.mtf', format='my-table-format')
"""
def __init__(self):
super().__init__() # set _identifiers
self._readers = OrderedDict()
self._registries["read"] = dict(attr="_readers", column="Read")
self._registries_order = ("read", "identify")
# =========================================================================
# Read methods
def register_reader(self, data_format, data_class, function, force=False,
priority=0):
"""
Register a reader function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when reading.
data_class : class
The class of the object that the reader produces.
function : function
The function to read in a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the reader, used to compare possible formats when
trying to determine the best reader to use. Higher priorities are
preferred over lower priorities, with the default priority being 0
(negative numbers are allowed though).
"""
if not (data_format, data_class) in self._readers or force:
self._readers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Reader for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def unregister_reader(self, data_format, data_class):
"""
Unregister a reader function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that the reader produces.
"""
if (data_format, data_class) in self._readers:
self._readers.pop((data_format, data_class))
else:
raise IORegistryError("No reader defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'read')
def get_reader(self, data_format, data_class):
"""Get reader for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
reader : callable
The registered reader function for this format and class.
"""
readers = [(fmt, cls) for fmt, cls in self._readers if fmt == data_format]
for reader_format, reader_class in readers:
if self._is_best_match(data_class, reader_class, readers):
return self._readers[(reader_format, reader_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Read')
raise IORegistryError(
"No reader defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def read(self, cls, *args, format=None, cache=False, **kwargs):
"""
Read in data.
Parameters
----------
cls : class
*args
The arguments passed to this method depend on the format.
format : str or None
cache : bool
Whether to cache the results of reading in the data.
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered reader.
"""
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary', cache=cache)
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'read', cls, path, fileobj, args, kwargs)
reader = self.get_reader(format, cls)
data = reader(*args, **kwargs)
if not isinstance(data, cls):
# User has read with a subclass where only the parent class is
# registered. This returns the parent class, so try coercing
# to desired subclass.
try:
data = cls(data)
except Exception:
raise TypeError('could not convert reader output to {} '
'class.'.format(cls.__name__))
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return data
# -----------------------------------------------------------------------------
class UnifiedOutputRegistry(_UnifiedIORegistryBase):
"""Write-only Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._writers = OrderedDict()
self._registries["write"] = dict(attr="_writers", column="Write")
self._registries_order = ("write", "identify", )
# =========================================================================
# Write Methods
def register_writer(self, data_format, data_class, function, force=False, priority=0):
"""
Register a table writer function.
Parameters
----------
data_format : str
The data format identifier. This is the string that will be used to
specify the data type when writing.
data_class : class
The class of the object that can be written.
function : function
The function to write out a data object.
force : bool, optional
Whether to override any existing function if already present.
Default is ``False``.
priority : int, optional
The priority of the writer, used to compare possible formats when trying
to determine the best writer to use. Higher priorities are preferred
over lower priorities, with the default priority being 0 (negative
numbers are allowed though).
"""
if not (data_format, data_class) in self._writers or force:
self._writers[(data_format, data_class)] = function, priority
else:
raise IORegistryError("Writer for format '{}' and class '{}' is "
'already defined'
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def unregister_writer(self, data_format, data_class):
"""
Unregister a writer function
Parameters
----------
data_format : str
The data format identifier.
data_class : class
The class of the object that can be written.
"""
if (data_format, data_class) in self._writers:
self._writers.pop((data_format, data_class))
else:
raise IORegistryError("No writer defined for format '{}' and class '{}'"
''.format(data_format, data_class.__name__))
if data_class not in self._delayed_docs_classes:
self._update__doc__(data_class, 'write')
def get_writer(self, data_format, data_class):
"""Get writer for ``data_format``.
Parameters
----------
data_format : str
The data format identifier. This is the string that is used to
specify the data type when reading/writing.
data_class : class
The class of the object that can be written.
Returns
-------
writer : callable
The registered writer function for this format and class.
"""
writers = [(fmt, cls) for fmt, cls in self._writers if fmt == data_format]
for writer_format, writer_class in writers:
if self._is_best_match(data_class, writer_class, writers):
return self._writers[(writer_format, writer_class)][0]
else:
format_table_str = self._get_format_table_str(data_class, 'Write')
raise IORegistryError(
"No writer defined for format '{}' and class '{}'.\n\nThe "
"available formats are:\n\n{}".format(
data_format, data_class.__name__, format_table_str))
def write(self, data, *args, format=None, **kwargs):
"""
Write out data.
Parameters
----------
data : object
The data to write.
*args
The arguments passed to this method depend on the format.
format : str or None
**kwargs
The arguments passed to this method depend on the format.
Returns
-------
object or None
The output of the registered writer. Most often `None`.
.. versionadded:: 4.3
"""
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], PATH_TYPES):
# path might be a os.PathLike object
if isinstance(args[0], os.PathLike):
args = (os.fspath(args[0]),) + args[1:]
path = args[0]
fileobj = None
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
format = self._get_valid_format(
'write', data.__class__, path, fileobj, args, kwargs)
writer = self.get_writer(format, data.__class__)
return writer(data, *args, **kwargs)
# -----------------------------------------------------------------------------
class UnifiedIORegistry(UnifiedInputRegistry, UnifiedOutputRegistry):
"""Unified I/O Registry.
.. versionadded:: 5.0
"""
def __init__(self):
super().__init__()
self._registries_order = ("read", "write", "identify")
def get_formats(self, data_class=None, readwrite=None):
"""
Get the list of registered I/O formats as a `~astropy.table.Table`.
Parameters
----------
data_class : class, optional
Filter readers/writer to match data class (default = all classes).
readwrite : str or None, optional
Search only for readers (``"Read"``) or writers (``"Write"``).
If None search for both. Default is None.
.. versionadded:: 1.3
Returns
-------
format_table : :class:`~astropy.table.Table`
Table of available I/O formats.
"""
return super().get_formats(data_class, readwrite)
|
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import rdflib
from rdflib.namespace import RDF, FOAF, RDFS, OWL, DC, DCTERMS, SKOS
from rdflib import URIRef, Literal, Namespace, XSD
import json
from mu.lib_unicode import UnicodeReader, UnicodeWriter
from mu.lib_dbpedia import DbpediaApi
import mu.mutil
from lib_ext import *
import re
import os
import hashlib
import json
import datetime
import urllib
import unicodedata
SWRC = Namespace('http://swrc.ontoware.org/ontology#')
SWC = Namespace('http://data.semanticweb.org/ns/swc/ontology#')
BIBO = Namespace('http://purl.org/ontology/bibo/')
ICAL = Namespace('http://www.w3.org/2002/12/cal/ical#')
DCTYPE = Namespace('http://purl.org/dc/dcmitype/')
VERSION_INFO = "iswc metadata 2001-2014 (2013-10-02)"
def expand_entry(entry):
map_data={}
if 'uri_me' in entry:
map_data["[ME]"]=entry['uri_me']
map_data["[WORKSHOP]"] ='http://data.semanticweb.org/workshop'
for key, value in entry.items():
for k, v in map_data.items():
temp = entry[key].replace(k, v)
if temp != entry[key]:
#print "\n{}\n-->\n{}".format(entry[key], temp)
entry[key] = temp
class DataIswc(object):
def __init__(self, local_config, global_config, dbpedia_api={}):
self.graph = rdflib.Graph()
self.graph.bind("foaf", FOAF)
self.graph.bind("dc", DC)
self.graph.bind("owl", OWL)
self.graph.bind("swrc", SWRC)
self.graph.bind("swc", SWC)
self.graph.bind("skos", SKOS)
self.graph.bind("bibo", BIBO)
self.graph.bind("dcterms", DCTERMS)
self.graph.bind("ical", ICAL)
self.graph.bind("dctype", DCTYPE)
self.local_config = local_config
self.global_config = global_config
self.map_name_res = {}
#self.map_name_name = {}
self.dbpedia_api = dbpedia_api
self.list_name_untyped = set()
@staticmethod
def dbpedia_api_load(dir_data):
dbpedia_api = {}
if os.path.exists(dir_data):
namespace = DataIswc.get_namespace(DataIswc.PREFIX_ORG)
dbpedia_api[namespace] = DbpediaApi(dir_data, DbpediaApi.ENTITY_TYPE_ORGANIZATION)
print "[{}] {} name mappings loaded".format(
namespace,
len(dbpedia_api[namespace].map_name))
# namespace = DataIswc.get_namespace(DataIswc.PREFIX_PERSON)
# dbpedia_api[namespace] = DbpediaApi(dir_data, DbpediaApi.ENTITY_TYPE_PERSON)
# print "[{}] {} name mappings loaded".format(
# namespace,
# len(dbpedia_api[namespace].map_name))
return dbpedia_api
@staticmethod
def dbpedia_api_write(dbpedia_api):
#save new entities
for api in dbpedia_api.values():
print "[]{} name mappings ".format(len(api.map_name))
api.write_new_data()
# def load_metadata(self):
# filename_source= "{0}/data/entity/organisation.csv".format(self.global_config["home"])
# if os.path.exists(filename_source):
# with open (filename_source) as f:
# csvreader = UnicodeReader(f)
# headers = csvreader.next()
# for row in csvreader:
# entry = dict(zip(headers, row))
# self.map_name_name[entry["altLabel"]] = {
# "prefLabel":entry["title"],
# "dbpediaUri":entry["uri"]}
# print "{0} name mappings loaded".format(len(self.map_name_name))
def run(self):
# self.load_metadata()
self.init_map_person_name()
self.process_organization()
self.process_person()
self.process_proceedings()
self.process_paper()
self.process_event()
self.process_misc()
filename_output = "{0}/data/www/{1}-complete.ttl".format(
self.global_config["home"],
self.local_config["id"])
with open(filename_output, "w") as f:
content = self.graph.serialize(format='turtle')
f.write(content)
print "{} name mappings without type".format(len(self.list_name_untyped))
def run_paper_x(self):
self.process_paper()
self.process_proceedings()
#self.process_misc()
filename_output = "{0}/data/www/{1}-conf-paper.ttl".format(
self.global_config["home"],
self.local_config["id"])
with open(filename_output, "w") as f:
content = self.graph.serialize(format='turtle')
f.write(content)
NS_ROOT = "http://data.semanticweb.org/"
PREFIX_ORG = "organization"
PREFIX_PERSON = "person"
PROP2URI = {
#datatype property
"label": {"p": [RDFS.label], "xsd": XSD.string},
"hasAcronym": {"p": [SWC.hasAcronym], "xsd": XSD.string},
"acronym": {"p": [SWC.hasAcronym], "xsd": XSD.string},
"name": {"p": [RDFS.label, FOAF.name], "xsd": XSD.string},
"title": {"p": [RDFS.label, DC.title, DCTERMS.title], "xsd": XSD.string},
"abstract": {"p": [SWRC.abstract], "xsd": XSD.string},
"hasAbstract": {"p": [SWRC.abstract], "xsd": XSD.string},
"year": {"p": [SWRC.year], "xsd": XSD.string},
"pages": {"p": [SWRC.pages], "xsd": XSD.string},
"keywords": {"p": [SWRC.listKeyword], "xsd": XSD.string, "delimiter": ","},
"publisher": {"p": [SWRC.publisher], "xsd": XSD.string},
"series": {"p": [SWRC.series], "xsd": XSD.string},
"volume": {"p": [SWRC.volume], "xsd": XSD.string},
"subtitle": {"p": [SWRC.subtitle], "xsd": XSD.string},
"alt-name": {"p": [SKOS.altLabel], "xsd": XSD.string, "delimiter": ","},
"other_names": {"p": [SKOS.altLabel], "xsd": XSD.string, "delimiter": ","},
"dtStart": {"p": [ICAL.dtstart], "xsd": XSD.dateTime},
"start": {"p": [ICAL.dtstart], "xsd": XSD.dateTime},
"dtEnd": {"p": [ICAL.dtend], "xsd": XSD.dateTime},
"end": {"p": [ICAL.dtend], "xsd": XSD.dateTime},
"tzid": {"p": [ICAL.tzid], "xsd": XSD.string},
"locationRoom": {"p": [SWC.hasLocation, SWC.room], "xsd": XSD.string},
"room": {"p": [SWC.hasLocation, SWC.room], "xsd": XSD.string},
"locationAddress": {"p": [SWC.hasLocation, SWC.address], "xsd": XSD.string},
"address": {"p": [SWC.hasLocation, SWC.address], "xsd": XSD.string},
"orderInSuperEvent": {"p": [SWC.orderInSession, SWC.order_in_super_event], "xsd": XSD.integer},
"order_in_super_event": {"p": [SWC.orderInSession, SWC.order_in_super_event], "xsd": XSD.integer},
"category": {"p": [SWRC.category], "xsd": XSD.string},
#object property
"link_open_access": {"p": [SWRC.url, SWRC.link_open_access]},
"link_open_access": {"p": [SWRC.url, SWRC.link_open_access]},
"link_publisher": {"p": [SWRC.url, SWRC.link_publisher]},
"link_publisher": {"p": [SWRC.url, SWRC.link_publisher]},
"linkDocument": {"p": [SWRC.url, SWRC.link_document]},
"link_document": {"p": [SWRC.url, SWRC.link_document]},
"depiction": {"p": [FOAF.depiction]},
"logo": {"p": [FOAF.logo]},
"homepage": {"p": [FOAF.homepage]}
}
@staticmethod
def get_namespace(prefix):
if DataIswc.PREFIX_ORG == prefix:
return "{0}{1}/".format(DataIswc.NS_ROOT, prefix)
elif DataIswc.PREFIX_PERSON == prefix:
return "{0}{1}/".format(DataIswc.NS_ROOT, prefix)
else:
return DataIswc.NS_ROOT
def expand_uri(self, uri):
for key in self.local_config["prefix_ns_map"]:
uri = uri.replace(key, self.local_config["prefix_ns_map"][key])
return uri
def cache_map_name_res(self, name, res):
#remove extra white space around
name = name.strip()
name = re.sub("\s+", " ", name)
localname = create_ascii_localname(name)
self.map_name_res[localname] = res
def create_list_named_entity(self, namespace, name):
real_name = None
if name in self.map_name_info:
if "real_name" in self.map_name_info[name]:
real_name=self.map_name_info[name]["real_name"]
#remove extra white space around
name = name.strip()
name = re.sub("\s+", " ", name)
ret = {}
#use canonical name
bool_processed = False
map_name_entry= {}
if namespace in self.dbpedia_api:
api = self.dbpedia_api[namespace]
map_name_entry = api.process_names(name)
for name_new in map_name_entry:
entry = map_name_entry[name_new]
if DbpediaApi.is_entry_auto(entry):
#print entry
print "new entry [{}]=>[{}]".format(name_new, entry["title"])
elif DbpediaApi.is_entry_skip(entry):
print "skip entry [{}]=>[{}]".format(name_new, entry["title"])
else:
#print entry
bool_processed = True
else:
map_name_entry[name] = None
for name_new in map_name_entry:
entry = map_name_entry[name_new]
if not bool_processed:
self.list_name_untyped.add(name_new)
localname = create_ascii_localname(name_new)
if localname in self.map_name_res:
ret[name_new] = self.map_name_res[localname]
else:
uri = "{0}{1}".format(namespace, localname)
res_entity = URIRef(uri)
if real_name:
self.create_triple_simple(res_entity, "name", real_name)
else:
self.create_triple_simple(res_entity, "name", name_new)
self.map_name_res[localname] = res_entity
if entry and 'uri' in entry and entry['uri']:
self.graph.add((res_entity, OWL.sameAs, URIRef(entry['uri'])))
if namespace == DataIswc.get_namespace(DataIswc.PREFIX_PERSON):
self.graph.add((res_entity, RDF.type, FOAF.Person))
elif namespace == DataIswc.get_namespace(DataIswc.PREFIX_ORG):
self.graph.add((res_entity, RDF.type, FOAF.Organization))
ret[name_new] = res_entity
return ret
def create_role_to_event(self, uri_event, role_type, role_label, res_entity):
if len(uri_event) == 0:
return
if len(role_type) == 0:
return
if len(role_label) == 0:
return
uri_event = self.expand_uri(uri_event)
res_event = URIRef(uri_event)
res_role_type = URIRef(self.expand_uri(role_type))
uri_role = "%s/%s" % (uri_event, create_ascii_localname(role_label) )
res_role = URIRef(uri_role)
self.graph.add((res_role, RDF.type, res_role_type))
self.graph.add((res_role, RDFS.label, Literal(role_label)))
self.graph.add((res_role, SWC.isRoleAt, res_event))
self.graph.add((res_role, SWC.heldBy, res_entity))
self.graph.add((res_event, SWC.hasRole, res_role ))
self.graph.add((res_entity, SWC.holdsRole, res_role))
def create_triple_complex(self, res_subject, list_field, entry):
for field in list_field:
if field in entry:
self.create_triple_simple(res_subject, field, entry[field])
def create_triple_simple(self, res_subject, field, value):
if len(value) == 0:
return
for p in DataIswc.PROP2URI[field]["p"]:
if "xsd" in DataIswc.PROP2URI[field]:
if XSD.string == DataIswc.PROP2URI[field]["xsd"]:
self.graph.add((res_subject, p, Literal(value)))
else:
self.graph.add((res_subject, p, Literal(value, datatype=DataIswc.PROP2URI[field]["xsd"])))
else:
self.graph.add((res_subject, p, URIRef(value)))
def process_misc(self):
res_me = URIRef(self.expand_uri("[ME]"))
res_data = URIRef(self.expand_uri("[ME]/complete"))
self.graph.add((res_me, SWC.completeGraph, res_data ))
self.graph.add((res_data, RDF.type, DCTYPE.Dataset ))
self.graph.add((res_data, DCTERMS.hasVersion, Literal(VERSION_INFO)))
self.graph.add((res_data, RDFS.comment, Literal(
"This dataset is created by Li Ding http://liding.org. To learn more about this dataset, go to https://github.com/lidingpku/open-conference-data/tree/master/data/iswc ")))
self.graph.add(
(res_data, DCTERMS.modified, Literal(datetime.datetime.now().isoformat(), datatype=XSD.datetime)))
self.graph.add((res_data, DCTERMS.creator, Literal("Li Ding")))
def process_organization(self):
filename = "{0}/data/source/{1}-organization.csv".format(
self.global_config["home"],
self.local_config["id"])
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) < len(headers):
#print "skipping row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["name"]) == 0:
#print "skipping empty name row %s" % entry
continue
for res_organization in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_ORG), entry["name"]).values():
#object properties
self.create_triple_complex(res_organization, ["homepage", "logo"], entry)
#role
self.create_role_to_event(
entry["role_event"],
entry["role_type"],
entry["role_label"],
res_organization)
def init_map_person_name(self):
if hasattr(self, "map_name"):
return
# load global entity name mappings
filename = "{0}/data/entity/person.csv".format(
self.global_config["home"],
self.local_config["id"])
map_name = {} #othername -> name
map_name_info = {} #name -> (real name, list of other name)
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["name"]:
name = entry["name"].strip()
if ["other_names"]:
#real_name = entry["name"]
#if "real_name" in entry:
# real_name = entry["real_name"]
map_name_info[name] = {"other_names": [x.strip() for x in entry["other_names"].split(";")]}
for other_name in map_name_info[name]["other_names"]:
map_name[other_name] = name
self.map_name = map_name
self.map_name_info = map_name_info
def get_final_name(self,name):
self.init_map_person_name()
name = name.strip()
if name in self.map_name:
return self.map_name[name]
else:
return name
def process_person(self):
#load person
filename = "{0}/data/source/{1}-person.csv".format(
self.global_config["home"],
self.local_config["id"])
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["name"]) == 0:
#print "skipping empty name row %s" % entry
continue
name = entry["name"].strip()
name = self.get_final_name(name)
for res_person in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), name).values():
#map other names
for other_name in entry["other_names"].split(","):
self.cache_map_name_res(other_name, res_person)
if name in self.map_name_info:
for other_name in self.map_name_info[name]["other_names"]:
self.cache_map_name_res(other_name, res_person)
#object properties
self.create_triple_complex(res_person, ["homepage"], entry)
#role
self.create_role_to_event(
entry["role_event"],
entry["role_type"],
entry["role_label"],
res_person)
#organization
if "organization" in entry:
for org in entry["organization"].split(";"):
if len(org) == 0:
continue
for res_organization in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_ORG), org).values():
self.graph.add((res_organization, FOAF.member, res_person))
#inverse property
self.graph.add((res_person, SWRC.affiliation, res_organization))
#alt-name
self.create_triple_complex(res_person, ["other_names"], entry)
#email
if len(entry["email"]) > 0:
if not entry["email"].startswith("mailto:"):
mbox = "mailto:%s" % entry["email"]
else:
mbox = entry["email"]
mbox_sha1sum = hashlib.sha1(mbox).hexdigest()
#self.graph.add( (res_person, FOAF.mbox, URIRef(mbox)) )
self.graph.add((res_person, FOAF.mbox_sha1sum, Literal(mbox_sha1sum)))
def process_event(self):
filename = "{0}/data/source/{1}-event.csv".format(
self.global_config["home"],
self.local_config["id"])
counter_event = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if len(entry["label"].strip()) == 0:
#print "skipping empty label row %s" % entry
continue
if len(entry["event_type"].strip()) == 0:
#print "skipping empty event_type row %s" % entry
continue
if entry["event_uri"].startswith("#"):
#print "skipping empty commented row %s" % entry
continue
#set default super event
if len(entry["super_event_uri"]) == 0:
entry["super_event_uri"] = "[ME]"
expand_entry(entry)
uri_super_event = self.expand_uri(entry["super_event_uri"])
res_super_event = URIRef(uri_super_event)
if len(entry["event_uri"]) == 0:
counter_event.inc(uri_super_event)
entry["event_uri"] = "%s/event-%02d" % (
uri_super_event,
counter_event.data[uri_super_event])
uri_event = self.expand_uri(entry["event_uri"])
res_event = URIRef(uri_event)
#event type
self.graph.add((res_event, RDF.type, SWC[entry["event_type"]]))
#super event
self.graph.add((res_event, SWC.isSubEventOf, res_super_event))
self.graph.add((res_super_event, SWC.isSuperEventOf, res_event))
#simple properties
self.create_triple_complex(
res_event,
["label", "acronym", "abstract",
"order_in_super_event",
"start", "end", "tzid",
"room", "address",
"homepage", "link_document", "logo"],
entry)
#linking paper event
if "TalkEvent" == entry["event_type"]:
if entry["label"] in self.map_name_res:
res_paper = self.map_name_res[entry["label"]]
self.graph.add(( res_event, SWC.hasRelatedDocument, res_paper))
self.graph.add(( res_paper, SWC.relatedToEvent, res_event))
else:
print "missing paper link [{}]".format(entry["label"])
#print json.dumps(self.map_name_res, indent=4, sort_keys=True)
sys.exit(0)
#role -chair
for role in ["Chair", "Presenter"]:
role_lower = role.lower()
if len(entry[role_lower + "_person"]) > 0:
person_data = DataIswc.parse_person_list(entry[role_lower + "_person"])
for name in person_data["list"]:
if len(name) == 0:
continue
name = self.get_final_name(name)
for res_person in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON),name).values():
role_label_x = entry[role_lower + "_label"]
event_type_x = entry["event_type"].split("#")[-1].replace("Event", "")
if event_type_x in ["Workshop", "Tutorial"]:
role_label_x = u"{} {}".format(event_type_x, role_label_x)
assert (len(role.strip())>0)
self.create_role_to_event(
uri_event,
"swc:" + role,
role_label_x,
res_person)
def create_container(self, elements, contType, uri_subject=None):
'''http://dev.w3.org/2004/PythonLib-IH/NewRDFLib/rdflib/Graph.py'''
if None == uri_subject:
container = BNode()
else:
container = URIRef(uri_subject)
self.graph.add((container, RDF.type, contType))
for i in range(0, len(elements)):
uri_pred = "%s_%d" % (RDF, i + 1)
pred = URIRef(uri_pred)
self.graph.add((container, pred, elements[i]))
return container
@staticmethod
def parse_person_list(text):
author_x = text
author_x = re.sub("[,\s]+and[,\s]+", ",", author_x)
author_x = re.sub("\s+", " ", author_x)
list_author_x = [x.strip() for x in author_x.split(",")]
if "" in list_author_x:
#print "....."
list_author_x.remove("")
if len(list_author_x) > 1:
author_x_and = "{} and {}".format(",".join(list_author_x[0:-1]), list_author_x[-1])
else:
author_x_and = list_author_x[0]
ret = {}
ret["text"] = author_x_and
ret["list"] = list_author_x
return ret
def process_paper(self):
filename = "{0}/data/source/iswc-all-papers.csv".format(
self.global_config["home"])
if self.local_config["id"] in ["iswc-2013","iswc-2014"]:
filename = "{}/data/source/{}-paper.csv".format(
self.global_config["home"],
self.local_config["id"])
counter_paper = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
#print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["year"] != self.local_config["year"]:
#skip mismatched year
continue
if len(entry["title"]) == 0:
print "skipping empty title row %s" % entry
continue
if len(entry["proceedings_uri"]) == 0:
print "skipping empty proceedings row %s" % entry
continue
expand_entry(entry)
counter_paper.inc(entry["proceedings_uri"])
id_paper = counter_paper.data[entry["proceedings_uri"]]
uri_paper = "%s/paper-%02d" % (entry["proceedings_uri"], id_paper)
uri_paper_author_list = "%s/paper-%02d/author_list" % (entry["proceedings_uri"], id_paper)
#print json.dumps(entry, indent=4)
#print uri_paper
res_proceedings = URIRef(entry["proceedings_uri"])
res_paper = URIRef(uri_paper)
self.graph.add((res_paper, RDF.type, SWRC.InProceedings ))
#part-of proceedings
self.graph.add((res_paper, SWC.isPartOf, res_proceedings))
self.graph.add((res_proceedings, SWC.hasPart, res_paper))
#author
author_data = DataIswc.parse_person_list(entry["author"])
# if author_x_and != entry["author"]:
# print "--------------"
# print entry["author"]
# print author_x_and
# author_x_and_y = re.sub("\s+"," ",author_x_and)
# if author_x_and != author_x_and_y:
# print "????"
# print author_x_and
# print author_x_and_y
self.graph.add((res_paper, SWRC.listAuthor, Literal(author_data["text"])))
list_res_author = []
for author in author_data["list"]:
author = self.get_final_name(author)
for res_author in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), author).values():
self.graph.add((res_author, RDF.type, FOAF.Person))
list_res_author.append(res_author)
self.graph.add((res_paper, SWRC.author, res_author))
self.graph.add((res_paper, FOAF.maker, res_author))
self.graph.add((res_author, FOAF.made, res_paper))
res_paper_author_list = self.create_container(list_res_author, RDF.Seq, uri_paper_author_list)
self.graph.add((res_paper, BIBO.authorList, res_paper_author_list))
#simple properties
self.create_triple_complex(
res_paper,
["abstract", "keywords", "year", "pages", "title", "category",
"link_open_access", "link_publisher"],
entry)
#cache
self.map_name_res[entry["title"]] = res_paper
def process_proceedings(self):
filename = "{0}/data/source/iswc-all-proceedings.csv".format(
self.global_config["home"])
counter_paper = MyCounter()
with open(filename) as f:
csvreader = UnicodeReader(f)
headers = csvreader.next()
for row in csvreader:
if len(row) != len(headers):
print "skipping mismatch row %s" % row
continue
entry = dict(zip(headers, row))
if entry["year"] != self.local_config["year"]:
#skip mismatched year
continue
if len(entry["title"]) == 0:
print "skipping empty title row %s" % entry
continue
if len(entry["proceedings_uri"]) == 0:
print "skipping empty proceedings_uri row %s" % entry
continue
expand_entry(entry)
uri_proceedings = self.expand_uri(entry["proceedings_uri"])
uri_proceedings_editor_list = "%s/editor_list" % (uri_proceedings)
uri_event = self.expand_uri(entry["event_uri"])
#print json.dumps(entry, indent=4)
#print uri_proceedings
res_proceedings = URIRef(uri_proceedings)
res_event = URIRef(uri_event)
self.graph.add((res_proceedings, RDF.type, SWRC.Proceedings ))
#relation to event
self.graph.add((res_proceedings, SWC.relatedToEvent, res_event))
self.graph.add((res_event, SWRC.hasRelatedDocument, res_proceedings))
#editor
if len(entry["editor"]) > 0:
self.graph.add((res_proceedings, SWRC.listEditor, Literal(entry["editor"])))
list_res_editor = []
for editor in entry["editor"].split(","):
editor = self.get_final_name(editor)
for res_editor in self.create_list_named_entity(DataIswc.get_namespace(DataIswc.PREFIX_PERSON), editor).values():
list_res_editor.append(res_editor)
self.graph.add((res_proceedings, SWRC.editor, res_editor))
self.graph.add((res_proceedings, FOAF.maker, res_editor))
self.graph.add((res_editor, FOAF.made, res_proceedings))
res_proceedings_editor_list = self.create_container(list_res_editor, RDF.Seq,
uri_proceedings_editor_list)
self.graph.add((res_proceedings, SWC.editorList, res_proceedings_editor_list))
#simple properties
self.create_triple_complex(
res_proceedings,
["title", "subtitle", "abstract", "keywords", "year", "pages", "publisher", "series", "volume",
"link_open_access", "link_publisher", "depiction"],
entry)
def main():
# load config file
#with open("config.json") as f:
# global_config = json.load( f)
global_config = mu.mutil.config_load(file_home=__file__)
print global_config
dir_data_entity = os.path.join(global_config["home"], "data/entity/")
dbpedia_api = DataIswc.dbpedia_api_load(dir_data_entity)
try:
for year in range(2001, 2015):
#if year != 2014:
# continue
local_config = {
"year": "{}".format(year),
"id-swsa": "ISWC{}".format(year),
"id-dogfood": "iswc-{}".format(year),
"id": "iswc-{}".format(year),
"prefix_ns_map": {
"[ISWC]": "{}conference/iswc".format(DataIswc.NS_ROOT),
"[WORKSHOP]": "{}workshop".format(DataIswc.NS_ROOT),
"[ME]": "{}conference/iswc/{}".format(DataIswc.NS_ROOT, year),
"swc:": "http://data.semanticweb.org/ns/swc/ontology#"
}
}
print "processing {}".format(local_config["id"])
if year == 2007:
local_config["id-dogfood"] = "iswc-aswc-2007"
local_config["prefix_ns_map"]["[ME]"] = "{}conference/iswc-aswc/{}".format(
DataIswc.NS_ROOT, year)
# elif year==2001:
# local_config["id-dogfood"]="swws-2001"
# local_config["prefix_ns_map"]["[ME]"] ="{}conference/iswc/{}".format(
# DataIswc.NS_ROOT,
# local_config["id-dogfood"])
data = DataIswc(local_config, global_config, dbpedia_api)
data.run_paper_x()
if not year in range(2006, 2012):
data = DataIswc(local_config, global_config, dbpedia_api)
data.run()
DataIswc.dbpedia_api_write(dbpedia_api)
except:
DataIswc.dbpedia_api_write(dbpedia_api)
import traceback
traceback.print_exc()
raise
print "All done"
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains methods related to `Section 4`_ of the OAuth 2 RFC.
.. _`Section 4`: https://tools.ietf.org/html/rfc6749#section-4
"""
from __future__ import absolute_import, unicode_literals
import json
import os
import time
from oauthlib.common import add_params_to_qs, add_params_to_uri, unicode_type
from oauthlib.signals import scope_changed
from .errors import (InsecureTransportError, MismatchingStateError,
MissingCodeError, MissingTokenError,
MissingTokenTypeError, raise_from_error)
from .tokens import OAuth2Token
from .utils import is_secure_transport, list_to_scope, scope_to_list
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
def prepare_grant_uri(uri, client_id, response_type, redirect_uri=None,
scope=None, state=None, **kwargs):
"""Prepare the authorization grant request URI.
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the ``application/x-www-form-urlencoded`` format as defined by
[`W3C.REC-html401-19991224`_]:
:param response_type: To indicate which OAuth 2 grant/flow is required,
"code" and "token".
:param client_id: The client identifier as described in `Section 2.2`_.
:param redirect_uri: The client provided URI to redirect back to after
authorization as described in `Section 3.1.2`_.
:param scope: The scope of the access request as described by
`Section 3.3`_.
:param state: An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent
back to the client. The parameter SHOULD be used for
preventing cross-site request forgery as described in
`Section 10.12`_.
:param kwargs: Extra arguments to embed in the grant/authorization URL.
An example of an authorization code grant authorization URL:
.. code-block:: http
GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`W3C.REC-html401-19991224`: https://tools.ietf.org/html/rfc6749#ref-W3C.REC-html401-19991224
.. _`Section 2.2`: https://tools.ietf.org/html/rfc6749#section-2.2
.. _`Section 3.1.2`: https://tools.ietf.org/html/rfc6749#section-3.1.2
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`section 10.12`: https://tools.ietf.org/html/rfc6749#section-10.12
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
params = [(('response_type', response_type)),
(('client_id', client_id))]
if redirect_uri:
params.append(('redirect_uri', redirect_uri))
if scope:
params.append(('scope', list_to_scope(scope)))
if state:
params.append(('state', state))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_uri(uri, params)
def prepare_token_request(grant_type, body='', **kwargs):
"""Prepare the access token request.
The client makes a request to the token endpoint by adding the
following parameters using the ``application/x-www-form-urlencoded``
format in the HTTP request entity-body:
:param grant_type: To indicate grant type being used, i.e. "password",
"authorization_code" or "client_credentials".
:param body: Existing request body to embed parameters in.
:param code: If using authorization code grant, pass the previously
obtained authorization code as the ``code`` argument.
:param redirect_uri: If the "redirect_uri" parameter was included in the
authorization request as described in
`Section 4.1.1`_, and their values MUST be identical.
:param kwargs: Extra arguments to embed in the request body.
An example of an authorization code token request body:
.. code-block:: http
grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb
.. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
params = [('grant_type', grant_type)]
if 'scope' in kwargs:
kwargs['scope'] = list_to_scope(kwargs['scope'])
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
return add_params_to_qs(body, params)
def prepare_token_revocation_request(url, token, token_type_hint="access_token",
callback=None, body='', **kwargs):
"""Prepare a token revocation request.
The client constructs the request by including the following parameters
using the "application/x-www-form-urlencoded" format in the HTTP request
entity-body:
token REQUIRED. The token that the client wants to get revoked.
token_type_hint OPTIONAL. A hint about the type of the token submitted
for revocation. Clients MAY pass this parameter in order to help the
authorization server to optimize the token lookup. If the server is unable
to locate the token using the given hint, it MUST extend its search across
all of its supported token types. An authorization server MAY ignore this
parameter, particularly if it is able to detect the token type
automatically. This specification defines two such values:
* access_token: An access token as defined in [RFC6749],
`Section 1.4`_
* refresh_token: A refresh token as defined in [RFC6749],
`Section 1.5`_
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter using the
registry defined in `Section 4.1.2`_.
.. _`Section 1.4`: https://tools.ietf.org/html/rfc6749#section-1.4
.. _`Section 1.5`: https://tools.ietf.org/html/rfc6749#section-1.5
.. _`Section 4.1.2`: https://tools.ietf.org/html/rfc7009#section-4.1.2
"""
if not is_secure_transport(url):
raise InsecureTransportError()
params = [('token', token)]
if token_type_hint:
params.append(('token_type_hint', token_type_hint))
for k in kwargs:
if kwargs[k]:
params.append((unicode_type(k), kwargs[k]))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if callback:
params.append(('callback', callback))
return add_params_to_uri(url, params), headers, body
else:
return url, headers, add_params_to_qs(body, params)
def parse_authorization_code_response(uri, state=None):
"""Parse authorization grant response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an authorization code and delivers it to the client by
adding the following parameters to the query component of the
redirection URI using the ``application/x-www-form-urlencoded`` format:
**code**
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
revoke (when possible) all tokens previously issued based on
that authorization code. The authorization code is bound to
the client identifier and redirection URI.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
:param uri: The full redirect URL back to the client.
:param state: The state parameter from the authorization request.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA
&state=xyz
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
query = urlparse.urlparse(uri).query
params = dict(urlparse.parse_qsl(query))
if not 'code' in params:
raise MissingCodeError("Missing code parameter in response.")
if state and params.get('state', None) != state:
raise MismatchingStateError()
return params
def parse_implicit_response(uri, state=None, scope=None):
"""Parse the implicit token response URI into a dict.
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the ``application/x-www-form-urlencoded`` format:
**access_token**
REQUIRED. The access token issued by the authorization server.
**token_type**
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
**expires_in**
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
**scope**
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by Section 3.3.
**state**
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
Similar to the authorization code response, but with a full token provided
in the URL fragment:
.. code-block:: http
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
"""
if not is_secure_transport(uri):
raise InsecureTransportError()
fragment = urlparse.urlparse(uri).fragment
params = dict(urlparse.parse_qsl(fragment, keep_blank_values=True))
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
if state and params.get('state', None) != state:
raise ValueError("Mismatching or missing state in params.")
params = OAuth2Token(params, old_scope=scope)
validate_token_parameters(params)
return params
def parse_token_response(body, scope=None):
"""Parse the JSON token response body into a dict.
The authorization server issues an access token and optional refresh
token, and constructs the response by adding the following parameters
to the entity body of the HTTP response with a 200 (OK) status code:
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
`Section 7.1`_. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
refresh_token
OPTIONAL. The refresh token which can be used to obtain new
access tokens using the same authorization grant as described
in `Section 6`_.
scope
OPTIONAL, if identical to the scope requested by the client,
otherwise REQUIRED. The scope of the access token as described
by `Section 3.3`_.
The parameters are included in the entity body of the HTTP response
using the "application/json" media type as defined by [`RFC4627`_]. The
parameters are serialized into a JSON structure by adding each
parameter at the highest structure level. Parameter names and string
values are included as JSON strings. Numerical values are included
as JSON numbers. The order of parameters does not matter and can
vary.
:param body: The full json encoded response body.
:param scope: The scope requested during authorization.
For example:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Cache-Control: no-store
Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter":"example_value"
}
.. _`Section 7.1`: https://tools.ietf.org/html/rfc6749#section-7.1
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
.. _`Section 3.3`: https://tools.ietf.org/html/rfc6749#section-3.3
.. _`RFC4627`: https://tools.ietf.org/html/rfc4627
"""
try:
params = json.loads(body)
except ValueError:
# Fall back to URL-encoded string, to support old implementations,
# including (at time of writing) Facebook. See:
# https://github.com/oauthlib/oauthlib/issues/267
params = dict(urlparse.parse_qsl(body))
for key in ('expires_in', 'expires'):
if key in params: # cast a couple things to int
params[key] = int(params[key])
if 'scope' in params:
params['scope'] = scope_to_list(params['scope'])
if 'expires' in params:
params['expires_in'] = params.pop('expires')
if 'expires_in' in params:
params['expires_at'] = time.time() + int(params['expires_in'])
params = OAuth2Token(params, old_scope=scope)
validate_token_parameters(params)
return params
def validate_token_parameters(params):
"""Ensures token precence, token type, expiration and scope in params."""
if 'error' in params:
raise_from_error(params.get('error'), params)
if not 'access_token' in params:
raise MissingTokenError(description="Missing access token parameter.")
if not 'token_type' in params:
if os.environ.get('OAUTHLIB_STRICT_TOKEN_TYPE'):
raise MissingTokenTypeError()
# If the issued access token scope is different from the one requested by
# the client, the authorization server MUST include the "scope" response
# parameter to inform the client of the actual scope granted.
# https://tools.ietf.org/html/rfc6749#section-3.3
if params.scope_changed:
message = 'Scope has changed from "{old}" to "{new}".'.format(
old=params.old_scope, new=params.scope,
)
scope_changed.send(message=message, old=params.old_scopes, new=params.scopes)
if not os.environ.get('OAUTHLIB_RELAX_TOKEN_SCOPE', None):
w = Warning(message)
w.token = params
w.old_scope = params.old_scopes
w.new_scope = params.scopes
raise w
|
|
# Copyright 2015-2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import platform
import time
from typing import Iterable
from prometheus_client.core import (
REGISTRY,
CounterMetricFamily,
Gauge,
GaugeMetricFamily,
Histogram,
Metric,
)
from twisted.internet import task
"""Prometheus metrics for garbage collection"""
logger = logging.getLogger(__name__)
# The minimum time in seconds between GCs for each generation, regardless of the current GC
# thresholds and counts.
MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0)
running_on_pypy = platform.python_implementation() == "PyPy"
#
# Python GC metrics
#
gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"])
gc_time = Histogram(
"python_gc_time",
"Time taken to GC (sec)",
["gen"],
buckets=[
0.0025,
0.005,
0.01,
0.025,
0.05,
0.10,
0.25,
0.50,
1.00,
2.50,
5.00,
7.50,
15.00,
30.00,
45.00,
60.00,
],
)
class GCCounts:
def collect(self) -> Iterable[Metric]:
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
for n, m in enumerate(gc.get_count()):
cm.add_metric([str(n)], m)
yield cm
def install_gc_manager() -> None:
"""Disable automatic GC, and replace it with a task that runs every 100ms
This means that (a) we can limit how often GC runs; (b) we can get some metrics
about GC activity.
It does nothing on PyPy.
"""
if running_on_pypy:
return
REGISTRY.register(GCCounts())
gc.disable()
# The time (in seconds since the epoch) of the last time we did a GC for each generation.
_last_gc = [0.0, 0.0, 0.0]
def _maybe_gc() -> None:
# Check if we need to do a manual GC (since its been disabled), and do
# one if necessary. Note we go in reverse order as e.g. a gen 1 GC may
# promote an object into gen 2, and we don't want to handle the same
# object multiple times.
threshold = gc.get_threshold()
counts = gc.get_count()
end = time.time()
for i in (2, 1, 0):
# We check if we need to do one based on a straightforward
# comparison between the threshold and count. We also do an extra
# check to make sure that we don't a GC too often.
if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]:
if i == 0:
logger.debug("Collecting gc %d", i)
else:
logger.info("Collecting gc %d", i)
start = time.time()
unreachable = gc.collect(i)
end = time.time()
_last_gc[i] = end
gc_time.labels(i).observe(end - start)
gc_unreachable.labels(i).set(unreachable)
gc_task = task.LoopingCall(_maybe_gc)
gc_task.start(0.1)
#
# PyPy GC / memory metrics
#
class PyPyGCStats:
def collect(self) -> Iterable[Metric]:
# @stats is a pretty-printer object with __str__() returning a nice table,
# plus some fields that contain data from that table.
# unfortunately, fields are pretty-printed themselves (i. e. '4.5MB').
stats = gc.get_stats(memory_pressure=False) # type: ignore
# @s contains same fields as @stats, but as actual integers.
s = stats._s # type: ignore
# also note that field naming is completely braindead
# and only vaguely correlates with the pretty-printed table.
# >>>> gc.get_stats(False)
# Total memory consumed:
# GC used: 8.7MB (peak: 39.0MB) # s.total_gc_memory, s.peak_memory
# in arenas: 3.0MB # s.total_arena_memory
# rawmalloced: 1.7MB # s.total_rawmalloced_memory
# nursery: 4.0MB # s.nursery_size
# raw assembler used: 31.0kB # s.jit_backend_used
# -----------------------------
# Total: 8.8MB # stats.memory_used_sum
#
# Total memory allocated:
# GC allocated: 38.7MB (peak: 41.1MB) # s.total_allocated_memory, s.peak_allocated_memory
# in arenas: 30.9MB # s.peak_arena_memory
# rawmalloced: 4.1MB # s.peak_rawmalloced_memory
# nursery: 4.0MB # s.nursery_size
# raw assembler allocated: 1.0MB # s.jit_backend_allocated
# -----------------------------
# Total: 39.7MB # stats.memory_allocated_sum
#
# Total time spent in GC: 0.073 # s.total_gc_time
pypy_gc_time = CounterMetricFamily(
"pypy_gc_time_seconds_total",
"Total time spent in PyPy GC",
labels=[],
)
pypy_gc_time.add_metric([], s.total_gc_time / 1000)
yield pypy_gc_time
pypy_mem = GaugeMetricFamily(
"pypy_memory_bytes",
"Memory tracked by PyPy allocator",
labels=["state", "class", "kind"],
)
# memory used by JIT assembler
pypy_mem.add_metric(["used", "", "jit"], s.jit_backend_used)
pypy_mem.add_metric(["allocated", "", "jit"], s.jit_backend_allocated)
# memory used by GCed objects
pypy_mem.add_metric(["used", "", "arenas"], s.total_arena_memory)
pypy_mem.add_metric(["allocated", "", "arenas"], s.peak_arena_memory)
pypy_mem.add_metric(["used", "", "rawmalloced"], s.total_rawmalloced_memory)
pypy_mem.add_metric(["allocated", "", "rawmalloced"], s.peak_rawmalloced_memory)
pypy_mem.add_metric(["used", "", "nursery"], s.nursery_size)
pypy_mem.add_metric(["allocated", "", "nursery"], s.nursery_size)
# totals
pypy_mem.add_metric(["used", "totals", "gc"], s.total_gc_memory)
pypy_mem.add_metric(["allocated", "totals", "gc"], s.total_allocated_memory)
pypy_mem.add_metric(["used", "totals", "gc_peak"], s.peak_memory)
pypy_mem.add_metric(["allocated", "totals", "gc_peak"], s.peak_allocated_memory)
yield pypy_mem
if running_on_pypy:
REGISTRY.register(PyPyGCStats())
|
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
__license__ = 'Public Domain'
import codecs
import io
import os
import random
import sys
from .options import (
parseOpts,
)
from .compat import (
compat_getpass,
compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
DateRange,
decodeOption,
DEFAULT_OUTTMPL,
DownloadError,
expand_path,
match_filter_func,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
SameFileError,
setproctitle,
std_headers,
write_string,
render_table,
)
from .update import update_self
from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors, list_extractors
from .extractor.adobepass import MSO_INFO
from .YoutubeDL import YoutubeDL
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
workaround_optparse_bug9161()
setproctitle('youtube-dl')
parser, opts, args = parseOpts(argv)
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
# Set referer
if opts.referer is not None:
std_headers['Referer'] = opts.referer
# Custom HTTP headers
if opts.headers is not None:
for h in opts.headers:
if ':' not in h:
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 1)
if opts.verbose:
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value
# Dump user agent
if opts.dump_user_agent:
write_string(std_headers['User-Agent'] + '\n', out=sys.stdout)
sys.exit(0)
# Batch file verification
batch_urls = []
if opts.batchfile is not None:
try:
if opts.batchfile == '-':
batchfd = sys.stdin
else:
batchfd = io.open(
expand_path(opts.batchfile),
'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if opts.verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
sys.exit('ERROR: batch file could not be read')
all_urls = batch_urls + [url.strip() for url in args] # batch_urls are already striped in read_batch_urls
_enc = preferredencoding()
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
if opts.list_extractors:
for ie in list_extractors(opts.age_limit):
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
matchedUrls = [url for url in all_urls if ie.suitable(url)]
for mu in matchedUrls:
write_string(' ' + mu + '\n', out=sys.stdout)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in list_extractors(opts.age_limit):
if not ie._WORKING:
continue
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
_COUNTS = ('', '5', '10', 'all')
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
write_string(desc + '\n', out=sys.stdout)
sys.exit(0)
if opts.ap_list_mso:
table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]
write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error('account username missing\n')
if opts.ap_password is not None and opts.ap_username is None:
parser.error('TV Provider account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error('using output template conflicts with using title, video ID or auto number')
if opts.autonumber_size is not None:
if opts.autonumber_size <= 0:
parser.error('auto number size must be positive')
if opts.autonumber_start is not None:
if opts.autonumber_start < 0:
parser.error('auto number start must be positive or 0')
if opts.usetitle and opts.useid:
parser.error('using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ap_username is not None and opts.ap_password is None:
opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error('invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error('invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error('invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.sleep_interval is not None:
if opts.sleep_interval < 0:
parser.error('sleep interval must be positive or 0')
if opts.max_sleep_interval is not None:
if opts.max_sleep_interval < 0:
parser.error('max sleep interval must be positive or 0')
if opts.max_sleep_interval < opts.sleep_interval:
parser.error('max sleep interval must be greater than or equal to min sleep interval')
else:
opts.max_sleep_interval = opts.sleep_interval
if opts.ap_mso and opts.ap_mso not in MSO_INFO:
parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')
def parse_retries(retries):
if retries in ('inf', 'infinite'):
parsed_retries = float('inf')
else:
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
parser.error('invalid retry count specified')
return parsed_retries
if opts.retries is not None:
opts.retries = parse_retries(opts.retries)
if opts.fragment_retries is not None:
opts.fragment_retries = parse_retries(opts.fragment_retries)
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error('invalid buffer size specified')
opts.buffersize = numeric_buffersize
if opts.playliststart <= 0:
raise ValueError('Playlist start must be positive')
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError('Playlist end must be greater than playlist start')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error('invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error('invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
parser.error('invalid video recode format specified')
if opts.convertsubtitles is not None:
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
parser.error('invalid subtitle format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
opts.format = 'bestaudio/best'
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
(opts.useid and '%(id)s.%(ext)s') or
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
' template'.format(outtmpl))
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
any_printing = opts.print_json
download_archive_fn = expand_path(opts.download_archive) if opts.download_archive is not None else opts.download_archive
# PostProcessors
postprocessors = []
if opts.metafromtitle:
postprocessors.append({
'key': 'MetadataFromTitle',
'titleformat': opts.metafromtitle
})
if opts.extractaudio:
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': opts.audioformat,
'preferredquality': opts.audioquality,
'nopostoverwrites': opts.nopostoverwrites,
})
if opts.recodevideo:
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': opts.recodevideo,
})
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
# FFmpegExtractAudioPP as containers before conversion may not support
# metadata (3gp, webm, etc.)
# And this post-processor should be placed before other metadata
# manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of
# extra metadata. By default ffmpeg preserves metadata applicable for both
# source and target containers. From this point the container won't change,
# so metadata can be added here.
if opts.addmetadata:
postprocessors.append({'key': 'FFmpegMetadata'})
if opts.convertsubtitles:
postprocessors.append({
'key': 'FFmpegSubtitlesConvertor',
'format': opts.convertsubtitles,
})
if opts.embedsubtitles:
postprocessors.append({
'key': 'FFmpegEmbedSubtitle',
})
if opts.embedthumbnail:
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
postprocessors.append({
'key': 'EmbedThumbnail',
'already_have_thumbnail': already_have_thumbnail
})
if not already_have_thumbnail:
opts.writethumbnail = True
# XAttrMetadataPP should be run after post-processors that may change file
# contents
if opts.xattrs:
postprocessors.append({'key': 'XAttrMetadata'})
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
postprocessors.append({
'key': 'ExecAfterDownload',
'exec_cmd': opts.exec_cmd,
})
external_downloader_args = None
if opts.external_downloader_args:
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'ap_mso': opts.ap_mso,
'ap_username': opts.ap_username,
'ap_password': opts.ap_password,
'quiet': (opts.quiet or any_getting or any_printing),
'no_warnings': opts.no_warnings,
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forceduration': opts.getduration,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson or opts.print_json,
'dump_single_json': opts.dump_single_json,
'simulate': opts.simulate or any_getting,
'skip_download': opts.skip_download,
'format': opts.format,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'autonumber_start': opts.autonumber_start,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'force_generic_extractor': opts.force_generic_extractor,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts.retries,
'fragment_retries': opts.fragment_retries,
'skip_unavailable_fragments': opts.skip_unavailable_fragments,
'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'playlistreverse': opts.playlist_reverse,
'playlistrandom': opts.playlist_random,
'noplaylist': opts.noplaylist,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeannotations': opts.writeannotations,
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'write_all_thumbnails': opts.write_all_thumbnails,
'writesubtitles': opts.writesubtitles,
'writeautomaticsub': opts.writeautomaticsub,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslangs': opts.subtitleslangs,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'write_pages': opts.write_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize,
'min_views': opts.min_views,
'max_views': opts.max_views,
'daterange': date,
'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code,
'age_limit': opts.age_limit,
'download_archive': download_archive_fn,
'cookiefile': opts.cookiefile,
'nocheckcertificate': opts.no_check_certificate,
'prefer_insecure': opts.prefer_insecure,
'proxy': opts.proxy,
'socket_timeout': opts.socket_timeout,
'bidi_workaround': opts.bidi_workaround,
'debug_printtraffic': opts.debug_printtraffic,
'prefer_ffmpeg': opts.prefer_ffmpeg,
'include_ads': opts.include_ads,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'extract_flat': opts.extract_flat,
'mark_watched': opts.mark_watched,
'merge_output_format': opts.merge_output_format,
'postprocessors': postprocessors,
'fixup': opts.fixup,
'source_address': opts.source_address,
'call_home': opts.call_home,
'sleep_interval': opts.sleep_interval,
'max_sleep_interval': opts.max_sleep_interval,
'external_downloader': opts.external_downloader,
'list_thumbnails': opts.list_thumbnails,
'playlist_items': opts.playlist_items,
'xattr_set_filesize': opts.xattr_set_filesize,
'match_filter': match_filter,
'no_color': opts.no_color,
'ffmpeg_location': opts.ffmpeg_location,
'hls_prefer_native': opts.hls_prefer_native,
'hls_use_mpegts': opts.hls_use_mpegts,
'external_downloader_args': external_downloader_args,
'postprocessor_args': postprocessor_args,
'cn_verification_proxy': opts.cn_verification_proxy,
'geo_verification_proxy': opts.geo_verification_proxy,
'config_location': opts.config_location,
'geo_bypass': opts.geo_bypass,
'geo_bypass_country': opts.geo_bypass_country,
# just for deprecation check
'autonumber': opts.autonumber if opts.autonumber is True else None,
'usetitle': opts.usetitle if opts.usetitle is True else None,
}
with YoutubeDL(ydl_opts) as ydl:
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose, ydl._opener)
# Remove cache dir
if opts.rm_cachedir:
ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
if opts.update_self or opts.rm_cachedir:
sys.exit()
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
parser.error(
'You must provide at least one URL.\n'
'Type youtube-dl --help to see a list of all options.')
try:
if opts.load_info_filename is not None:
retcode = ydl.download_with_info_file(expand_path(opts.load_info_filename))
else:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen('--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
def main(argv=None):
try:
_real_main(argv)
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit('ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit('\nERROR: Interrupted by user')
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple traceroute to the Gcloud VM's using gcloud SDK python library.
This script will lookup VMs within a gcloud project that matches a regexp
and run traceroute to each of them. This simplifies obtaining this kind
of debug information.
Typical Usage:
python traceroute.py "[regexp]"
Run "python traceroute.py -h" for more options
Requirements:
- Python 2.7
- Requires gcloud tool installed with a default project set up.
This will be the project that the script will use to look for VMs.
- Requires the Google API Client for Python
(https://cloud.google.com/compute/docs/tutorials/python-guide)
- Requires traceroute and dig (optional) to be installed and in PATH
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import re
import subprocess
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
###############################################
# Argument parsing
###############################################
def parse_args():
"""Parse the command line arguments.
Returns:
The parsed argparse instance
"""
parser = argparse.ArgumentParser(description="Utility for easily debug VMs")
parser.add_argument(
"-p",
"--print",
action="store_true",
help="List all the instances, instead of traceroute.")
parser.add_argument(
"--project",
help="""Project in which to run the tracerouter.
If none is specified, the default project will be used.""")
parser.add_argument(
"-d",
"--dig",
action="store_true",
help="Include dig (DNS lookup) information.")
parser.add_argument(
"-r",
"--reverse_traceroute",
action="store_true",
help="Include reverse traceroute (from VM to host).")
parser.add_argument(
"match_pattern",
help="""Pattern to match against VM names within the project.
Can use regexp expressions to match.""")
return parser.parse_args()
##############################################
# gcloud account / helpers
##############################################
def get_gcloud_api():
"""Obtains the google SDK api instance.
Returns:
The authenticated gcloud api instance
"""
credentials = GoogleCredentials.get_application_default()
compute_api = discovery.build("compute", "v1", credentials=credentials)
return compute_api
def obtain_self_ip():
"""Query open DNS to obtain the public IP of the current host.
Returns:
The public ip string
"""
ip = subprocess.check_output(
["dig", "myip.opendns.com", "@resolver1.opendns.com", "+short"])
return ip.strip()
def obtain_default_project():
"""Obtains the default project from the gcloud configuration.
Returns:
The name of the default project currently defined in gcloud's config
"""
default_project = subprocess.check_output(
["gcloud", "config", "get-value", "project"], stderr=subprocess.PIPE)
default_project = default_project.strip()
return default_project
def list_instances(compute_api, project, zone):
"""List the instances for a project/zone.
Args:
compute_api: The gcloud api instance.
project: The project name.
zone: The zone name
Returns:
A list of instance objects.
"""
result = compute_api.instances().list(project=project, zone=zone).execute()
if "items" in result:
return result["items"]
else:
return []
def get_zone_names(compute_api, project):
"""Obtains a list of zone names for a given project.
Args:
compute_api: The gcloud api instance.
project: The project name.
Returns:
A list of zone names that are running
"""
result = compute_api.zones().list(project=project).execute()
filtered_list = [
i for i in result["items"]
if (i["status"] == "UP") and (i["kind"] == "compute#zone")
]
return [i["name"] for i in filtered_list]
##############################################
# DNS Lookup
##############################################
def print_dig():
"""Performs a dig call and prints the result."""
print("Running \"dig -t txt o-o.myaddr.l.google.com @ns1.google.com\"")
dig = subprocess.Popen(
["dig", "-t", "txt", "o-o.myaddr.l.google.com", "@ns1.google.com"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(dig.stdout.readline, ""):
print(line, end="")
print("")
##############################################
# Obtaining instances
##############################################
def obtain_instances(compute_api, project, match_pattern):
"""Obtains all the VM instances that match a certain pattern.
Args:
compute_api: The gcloud api instance.
project: The project name.
match_pattern: The regex pattern to match.
Returns:
A dictionary of instances with the zone names as keys
i.e.
{
"us-central-1": [... list of instances ...],
"us-east-1: [... list of instances ...],
...
}
"""
zone_names = get_zone_names(compute_api, project)
regex = re.compile(match_pattern)
zone_instances = {}
for zone_name in zone_names:
zone_instances[zone_name] = []
instances = list_instances(compute_api, project, zone_name)
f = [
x for x in instances
if (x["status"] == "RUNNING") and (x["kind"] == "compute#instance") and
regex.match(x["name"])
]
zone_instances[zone_name] = f
return zone_instances
##############################################
# Action to be taken
##############################################
def print_subprocess(proc_name, proc_args):
"""Runs a subprocess and prints out the output.
Has special management of exception when running a remote command
(i.e. running a ssh command through gcloud).
Args:
proc_name: The name of the subprocess to call.
Mainly used for error printing.
proc_args: A list with all the arguments for a subprocess call.
Must be able to be passed to a subprocess.Popen call.
"""
error_str = ""
proc_args_str = " ".join(proc_args)
try:
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for l in iter(proc.stdout.readline, ""):
print(l, end="")
proc.wait()
if proc.returncode != 0:
e_str = "[FROM VM]: {0}".format(proc.stderr.read().strip())
# Recycling exception type
raise OSError(e_str)
except OSError as e:
error_str = str(e)
print("Error running {0}: {1}\nCALL: {2}\n".format(proc_name, error_str,
proc_args_str))
def traceroute_instance(instance,
tr_project_name,
tr_zone_name,
reverse_traceroute=False):
"""Runs a traceroute to a certain instance in a project/zone.
Args:
instance: Instance name.
tr_project_name: The project to which the instance belongs.
tr_zone_name: The zone to which the instance belongs.
reverse_traceroute: By default, there will only be a traceroute from
the host to the VM. Enabling this flag will output a traceroute from
the VM to the host also. This will require to obtain the public IP
of the host using an external DNS server (probably openDNS).
"""
name = instance["name"]
external_ip = instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
self_ip = obtain_self_ip()
print(">>> Traceroute TO {0}: {1} -> {2}".format(name, self_ip, external_ip))
print_subprocess("Traceroute", ["traceroute", external_ip])
print(">>>")
if reverse_traceroute:
print("<<< Traceroute FROM {0}: {1} -> {2}".format(name, external_ip, self_ip))
print_subprocess("Reverse Traceroute", [
"gcloud", "compute", "ssh", name, "--project", tr_project_name,
"--zone", tr_zone_name, "--command", "traceroute {0}".format(self_ip)
])
print("<<<")
def print_instance(instance):
"""Prints the instance name/external ip."""
name = instance["name"]
external_ip = instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
print("{0}: {1}".format(name, external_ip))
def main():
args = parse_args()
if args.dig:
print_dig()
if args.project is None:
current_project = obtain_default_project()
else:
current_project = args.project
print("Project is: {0}".format(current_project))
print("#################################")
print("Obtaining instances...")
compute_api = get_gcloud_api()
zone_instances = obtain_instances(compute_api, current_project,
args.match_pattern)
for zone_name in zone_instances:
instances = zone_instances[zone_name]
if not len(instances):
continue
print("")
print("Instances in {0}".format(zone_name))
print("----------------------------------------")
for inst in zone_instances[zone_name]:
if args.print:
print_instance(inst)
else:
traceroute_instance(inst, current_project, zone_name,
args.reverse_traceroute)
if __name__ == "__main__":
main()
|
|
"""
Interpolation methods.
Originally, the base code for `interpolate`, `mix` and `steps` was ported from the
https://colorjs.io project. Since that time, there has been significant modifications
that add additional features etc. The base logic though is attributed to the original
authors.
In general, the logic mimics in many ways the `color-mix` function as outlined in the Level 5
color draft (Oct 2020), but the initial approach was modeled directly off of the work done in
color.js.
---
Original Authors: Lea Verou, Chris Lilley
License: MIT (As noted in https://github.com/LeaVerou/color.js/blob/master/package.json)
"""
import math
from abc import ABCMeta, abstractmethod
from collections.abc import Sequence, Mapping, Callable
from collections import namedtuple
from .. import util
from ..spaces import Cylindrical, Angle
class Lerp:
"""Linear interpolation."""
def __init__(self, progress):
"""Initialize."""
self.progress = progress
def __call__(self, a, b, t):
"""Interpolate with period."""
return a + (b - a) * (t if not isinstance(self.progress, Callable) else self.progress(t))
class Piecewise(namedtuple('Piecewise', ['color', 'stop', 'progress', 'hue', 'premultiplied'])):
"""Piecewise interpolation input."""
__slots__ = ()
def __new__(cls, color, stop=None, progress=None, hue=util.DEF_HUE_ADJ, premultiplied=False):
"""Initialize."""
return super().__new__(cls, color, stop, progress, hue, premultiplied)
class Interpolator(metaclass=ABCMeta):
"""Interpolator."""
@abstractmethod
def __init__(self):
"""Initialize."""
@abstractmethod
def __call__(self, p):
"""Call the interpolator."""
@abstractmethod
def get_delta(self):
"""Initialize."""
def steps(self, steps=2, max_steps=1000, max_delta_e=0):
"""Steps."""
return color_steps(self, steps, max_steps, max_delta_e)
class InterpolateSingle(Interpolator):
"""Interpolate a single range of two colors."""
def __init__(self, channels1, channels2, names, create, progress, space, outspace, premultiplied):
"""Initialize."""
self.names = names
self.channels1 = channels1
self.channels2 = channels2
self.create = create
self.progress = progress
self.space = space
self.outspace = outspace
self.premultiplied = premultiplied
def get_delta(self):
"""Get the delta."""
return self.create(self.space, self.channels1).delta_e(self.create(self.space, self.channels2))
def __call__(self, p):
"""Run through the coordinates and run the interpolation on them."""
channels = []
for i, c1 in enumerate(self.channels1):
name = self.names[i]
c2 = self.channels2[i]
if util.is_nan(c1) and util.is_nan(c2):
value = 0.0
elif util.is_nan(c1):
value = c2
elif util.is_nan(c2):
value = c1
else:
progress = None
if isinstance(self.progress, Mapping):
progress = self.progress.get(name, self.progress.get('all'))
else:
progress = self.progress
lerp = progress if isinstance(progress, Lerp) else Lerp(progress)
value = lerp(c1, c2, p)
channels.append(value)
color = self.create(self.space, channels[:-1], channels[-1])
if self.premultiplied:
postdivide(color)
return color.convert(self.outspace, in_place=True) if self.outspace != color.space() else color
class InterpolatePiecewise(Interpolator):
"""Interpolate multiple ranges of colors."""
def __init__(self, stops, interpolators):
"""Initialize."""
self.start = stops[0]
self.end = stops[len(stops) - 1]
self.stops = stops
self.interpolators = interpolators
def get_delta(self):
"""Get the delta total."""
return [i.get_delta() for i in self.interpolators]
def __call__(self, p):
"""Interpolate."""
percent = p
if percent > self.end:
# Beyond range, just interpolate the last colors
return self.interpolators[-1](1 + abs(p - self.end) if p > 1 else 1)
elif percent < self.start:
# Beyond range, just interpolate the last colors
return self.interpolators[0](0 - abs(self.start - p) if p < 0 else 0)
else:
last = self.start
for i, interpolator in enumerate(self.interpolators, 1):
stop = self.stops[i]
if percent <= stop:
r = stop - last
p2 = (percent - last) / r if r else 1
return interpolator(p2)
last = stop
def calc_stops(stops, count):
"""Calculate stops."""
# Ensure the first stop is set to zero if not explicitly set
if 0 not in stops:
stops[0] = 0
last = stops[0] * 100
highest = last
empty = None
final = {}
# Build up normalized stops
for i in range(count):
value = stops.get(i)
if value is not None:
value *= 100
# Found an empty hole, track the start
if value is None and empty is None:
empty = i - 1
continue
elif value is None:
continue
# We can't have a stop decrease in progression
if value < last:
value = last
# Track the largest explicit value set
if value > highest:
highest = value
# Fill in hole if one exists.
# Holes will be evenly space between the
# current and last stop.
if empty is not None:
r = i - empty
increment = (value - last) / r
for j in range(empty + 1, i):
last += increment
final[j] = last / 100
empty = None
# Set the stop and track it as the last
last = value
final[i] = last / 100
# If there is a hole at the end, fill in the hole,
# equally spacing the stops from the last to 100%.
# If the last is greater than 100%, then all will
# be equal to the last.
if empty is not None:
r = (count - 1) - empty
if highest > 100:
increment = 0
else:
increment = (100 - last) / r
for j in range(empty + 1, count):
last += increment
final[j] = last / 100
return final
def postdivide(color):
"""Premultiply the given transparent color."""
if color.alpha >= 1.0:
return
channels = color.coords()
gamut = color._space.RANGE
alpha = color.alpha
coords = []
for i, value in enumerate(channels):
a = gamut[i][0]
# Wrap the angle
if isinstance(a, Angle):
coords.append(value)
continue
coords.append(value / alpha if alpha != 0 else value)
color._space._coords = coords
def premultiply(color):
"""Premultiply the given transparent color."""
if color.alpha >= 1.0:
return
channels = color.coords()
gamut = color._space.RANGE
alpha = color.alpha
coords = []
for i, value in enumerate(channels):
a = gamut[i][0]
# Wrap the angle
if isinstance(a, Angle):
coords.append(value)
continue
coords.append(value * alpha)
color._space._coords = coords
def adjust_hues(color1, color2, hue):
"""Adjust hues."""
hue = hue.lower()
if hue == "specified":
return
name = color1._space.hue_name()
c1 = color1.get(name)
c2 = color2.get(name)
c1 = c1 % 360
c2 = c2 % 360
if util.is_nan(c1) or util.is_nan(c2):
color1.set(name, c1)
color2.set(name, c2)
return
if hue == "shorter":
if c2 - c1 > 180:
c1 += 360
elif c2 - c1 < -180:
c2 += 360
elif hue == "longer":
if 0 < (c2 - c1) < 180:
c1 += 360
elif -180 < (c2 - c1) < 0:
c2 += 360
elif hue == "increasing":
if c2 < c1:
c2 += 360
elif hue == "decreasing":
if c1 < c2:
c1 += 360
else:
raise ValueError("Unknown hue adjuster '{}'".format(hue))
color1.set(name, c1)
color2.set(name, c2)
def color_steps(interpolator, steps=2, max_steps=1000, max_delta_e=0):
"""Color steps."""
if max_delta_e <= 0:
actual_steps = steps
else:
actual_steps = 0
deltas = interpolator.get_delta()
if not isinstance(deltas, Sequence):
deltas = [deltas]
actual_steps = sum([d / max_delta_e for d in deltas])
actual_steps = max(steps, math.ceil(actual_steps) + 1)
if max_steps is not None:
actual_steps = min(actual_steps, max_steps)
ret = []
if actual_steps == 1:
ret = [{"p": 0.5, "color": interpolator(0.5)}]
else:
step = 1 / (actual_steps - 1)
for i in range(actual_steps):
p = i * step
ret.append({'p': p, 'color': interpolator(p)})
# Iterate over all the stops inserting stops in between if all colors
# if we have any two colors with a max delta greater than what was requested.
# We inject between every stop to ensure the midpoint does not shift.
if max_delta_e > 0:
# Initial check to see if we need to insert more stops
m_delta = 0
for i, entry in enumerate(ret):
if i == 0:
continue
m_delta = max(m_delta, entry['color'].delta_e(ret[i - 1]['color']))
while m_delta > max_delta_e:
# Inject stops while measuring again to see if it was sufficient
m_delta = 0
i = 1
while i < len(ret) and len(ret) < max_steps:
prev = ret[i - 1]
cur = ret[i]
p = (cur['p'] + prev['p']) / 2
color = interpolator(p)
m_delta = max(m_delta, color.delta_e(prev['color']), color.delta_e(cur['color']))
ret.insert(i, {'p': p, 'color': color})
i += 2
return [i['color'] for i in ret]
def color_piecewise_lerp(pw, space, out_space, progress, hue, premultiplied):
"""Piecewise Interpolation."""
# Ensure we have something we can interpolate with
count = len(pw)
if count == 1:
pw = [pw[0], pw[0]]
count += 1
# Calculate stops
stops = {}
for i, x in enumerate(pw, 0):
if not isinstance(x, Piecewise):
pw[i] = Piecewise(x)
elif x.stop is not None:
stops[i] = x.stop
stops = calc_stops(stops, count)
# Construct piecewise interpolation object
color_map = []
current = pw[0].color
for i in range(1, count):
p = pw[i]
color = current._handle_color_input(p.color)
color_map.append(
current.interpolate(
color,
space=space,
out_space=out_space,
progress=p.progress if p.progress is not None else progress,
hue=p.hue if p.hue is not None else hue,
premultiplied=p.premultiplied if p.premultiplied is not None else premultiplied
)
)
current = color
return InterpolatePiecewise(stops, color_map)
def color_lerp(color1, color2, space, out_space, progress, hue, premultiplied):
"""Color interpolation."""
# Convert to the color space and ensure the color fits inside
color1 = color1.convert(space, fit=True)
color2 = color1._handle_color_input(color2).convert(space, fit=True)
# Adjust hues if we have two valid hues
if isinstance(color1._space, Cylindrical):
adjust_hues(color1, color2, hue)
if premultiplied:
premultiply(color1)
premultiply(color2)
channels1 = color1.coords()
channels2 = color2.coords()
# Include alpha
channels1.append(color1.alpha)
channels2.append(color2.alpha)
return InterpolateSingle(
names=color1._space.CHANNEL_NAMES,
channels1=channels1,
channels2=channels2,
create=type(color1),
progress=progress,
space=space,
outspace=out_space,
premultiplied=premultiplied
)
class Interpolate:
"""Interpolate between colors."""
def mask(self, channel, *, invert=False, in_place=False):
"""Mask color channels."""
this = self if in_place else self.clone()
masks = set([channel] if isinstance(channel, str) else channel)
for name in self._space.CHANNEL_NAMES:
if (not invert and name in masks) or (invert and name not in masks):
this.set(name, util.NaN)
return this
def steps(self, color, *, steps=2, max_steps=1000, max_delta_e=0, **interpolate_args):
"""
Discrete steps.
This is built upon the interpolate function, and will return a list of
colors containing a minimum of colors equal to `steps` or steps as specified
derived from the `max_delta_e` parameter (whichever is greatest).
Number of colors can be capped with `max_steps`.
Default delta E method used is delta E 76.
"""
return self.interpolate(color, **interpolate_args).steps(steps, max_steps, max_delta_e)
def mix(self, color, percent=util.DEF_MIX, *, in_place=False, **interpolate_args):
"""
Mix colors using interpolation.
This uses the interpolate method to find the center point between the two colors.
The basic mixing logic is outlined in the CSS level 5 draft.
"""
if not self._is_color(color) and not isinstance(color, (str, Piecewise)):
raise TypeError("Unexpected type '{}'".format(type(color)))
color = self.interpolate(color, **interpolate_args)(percent)
return self.mutate(color) if in_place else color
def interpolate(
self, color, *, space="lab", out_space=None, stop=0, progress=None, hue=util.DEF_HUE_ADJ, premultiplied=False
):
"""
Return an interpolation function.
The function will return an interpolation function that accepts a value (which should
be in the range of [0..1] and will return a color based on that value.
While we use NaNs to mask off channels when doing the interpolation, we do not allow
arbitrary specification of NaNs by the user, they must specify channels via `adjust`
if they which to target specific channels for mixing. Null hues become NaNs before
mixing occurs.
"""
space = space.lower()
out_space = self.space() if out_space is None else out_space.lower()
# A piecewise object was provided, so treat it as such,
# or we've changed the stop of the base color, so run it through piecewise.
if (
isinstance(color, Piecewise) or
(stop != 0 and (isinstance(color, str) or self._is_color(color)))
):
color = [color]
if not isinstance(color, str) and isinstance(color, Sequence):
# We have a sequence, so use piecewise interpolation
return color_piecewise_lerp(
[Piecewise(self, stop=stop)] + list(color),
space,
out_space,
progress,
hue,
premultiplied
)
else:
# We have a sequence, so use piecewise interpolation
return color_lerp(
self,
color,
space,
out_space,
progress,
hue,
premultiplied
)
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import json
import datetime
from cms import api
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site, AdminSite
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse,
QueryDict, HttpResponseNotFound)
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.encoding import force_text, smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page
from cms.constants import PLUGIN_MOVE_ACTION
from cms.models import UserSettings, StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_6, DJANGO_1_7
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admiN_user = self.get_superuser()
if admin_only:
return admiN_user
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, 'test@test.com', 'test@test.com')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, 'test@test.com', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admiN_user, normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request('/admin/cms/page/1/', 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_permissioned_page_list(self):
"""
Makes sure that a user with restricted page permissions can view
the page list.
"""
admin_user, normal_guy = self._get_guys(use_global_permissions=False)
current_site = Site.objects.get(pk=1)
page = create_page("Test page", "nav_playground.html", "en",
site=current_site, created_by=admin_user)
PagePermission.objects.create(page=page, user=normal_guy)
with self.login_user_context(normal_guy):
resp = self.client.get(URL_CMS_PAGE)
self.assertEqual(resp.status_code, 200)
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0,
}
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 407)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 394)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.model_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_pagetree_filtered(self):
superuser = self.get_superuser()
create_page("root-page", "nav_playground.html", "en",
created_by=superuser, published=True)
with self.login_user_context(superuser):
url = admin_reverse('cms_page_changelist')
response = self.client.get('%s?template__exact=nav_playground.html' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
with self.settings(USE_TZ=False):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request('/admin/cms/page/1/', 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = admin_reverse('cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
if DJANGO_1_6:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
else:
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/en/admin/login/?next=/en/admin/cms/page/%s/permissions/' % page.pk)
admin_user = self.get_superuser()
with self.login_user_context(admin_user):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_get_results(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en', published=True)
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=second_level_page_top)
fourth_level_page = create_page('level23', "nav_playground.html", "en", # nopyflakes
created_by=admin_user,
parent=self.reload(first_level_page))
self.assertEqual(Page.objects.all().count(), 9)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
# full blown page list. only draft pages are taken into account
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 5)
# only one unpublished page is returned
request = self.get_request(url+'?q=level23')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 1)
# a number of pages matches the query
request = self.get_request(url+'?q=level2')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 3)
def test_changelist_tree(self):
""" This test checks for proper jstree cookie unquoting.
It should be converted to a selenium test to actually test the jstree behaviour.
Cookie set below is just a forged example (from live session)
"""
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='admin@django-cms.org', password='admin@django-cms.org')
else:
self.client.login(username='admin', password='admin')
self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["open_menu_trees"], [1, 2])
# tests descendants method for the lazy load ajax call
url = "%s%d/en/descendants/" % (url, first_level_page.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# should include both direct descendant pages
self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk)
self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk)
# but not any further down the tree
self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk)
self.assertNotContains(response, 'None')
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='admin@django-cms.org', password='admin@django-cms.org')
else:
self.client.login(username='admin', password='admin')
response = self.client.get('/en/admin/cms/page/1/?language=en')
self.assertEqual(response.status_code, 404)
def test_tree_displays_in_correct_language(self):
'''
Test to prove and protect that the page titles in the tree are
displayed in the currently set language.
'''
admin_guy, normal_guy = self._get_guys(use_global_permissions=False)
site = Site.objects.get(pk=1)
en_title = "EN Page"
es_title = "ES Pagina"
# Create a page in en
page = create_page(en_title, "nav_playground.html", "en", site=site, created_by=admin)
# Add a es-mx translation for this page
create_title("es-mx", es_title, page, slug="es_pagina")
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
url_pat = '<a href="{0}/{1}/preview/"[^>]*>{2}</a>'
with self.login_user_context(admin_guy):
# Check the EN version of the tree...
response = self.client.get(url, {'language': 'en'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'en', en_title, ))
# Check the ES version of the tree...
response = self.client.get(url, {'language': 'es-mx'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'es-mx', es_title, ))
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
url = '%s?language=de' % admin_reverse('cms_page_clear_placeholder', args=[ph.pk])
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="admin@django-cms.org", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="permless@django-cms.org", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 405)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 405)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
# These asserts are for #3589
self.assertContains(response, 'lang="en"')
self.assertContains(response, './%s/en/preview/' % page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_revert_page(self):
self.page.publish('en')
title = self.page.title_set.get(language='en')
title.title = 'new'
title.save()
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
new_title = Title.objects.get(pk=title.pk)
self.assertNotEqual(title.title, new_title.title)
self.assertTrue(title.publisher_is_draft)
self.assertTrue(new_title.publisher_is_draft)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en')
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
response = self.client.post(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
plugin_class = pageplugin.get_plugin_class_instance()
expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)}
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'})
self.assertRaises(ValueError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': sub_col.pk,
'placeholder_id': source.id, 'plugin_parent': col2.pk, 'plugin_language': 'de'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="admin@django-cms.org", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='admin@django-cms.org', password='admin')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='test@test.com', password='test@test.com')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='test@test.com', password='test@test.com')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_text(response.content))
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='test@test.com', password='test@test.com')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='test@test.com', password='test@test.com')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='test@test.com', password='test@test.com')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_placeholder_ref(self):
"""User copies a placeholder into a clipboard. A PlaceholderReferencePlugin is created. Afterwards he copies this
into a placeholder and the PlaceholderReferencePlugin unpacks its content. After that he clear the clipboard"""
self.assertEqual(Placeholder.objects.count(), 2)
self._create_plugin()
self._create_plugin()
admin_user = self.get_superuser()
clipboard = Placeholder()
clipboard.save()
self.assertEqual(CMSPlugin.objects.count(), 2)
settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user)
settings.save()
self.assertEqual(Placeholder.objects.count(), 3)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='admin@django-cms.org', password='admin@django-cms.org')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id='',
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='en',
target_placeholder_id=clipboard.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
clipboard_plugins = clipboard.get_plugins()
self.assertEqual(CMSPlugin.objects.count(), 5)
self.assertEqual(clipboard_plugins.count(), 1)
self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin")
placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance()
ref_placeholder = placeholder_plugin.placeholder_ref
copied_plugins = ref_placeholder.get_plugins()
self.assertEqual(copied_plugins.count(), 2)
data = dict(source_plugin_id=placeholder_plugin.pk,
source_placeholder_id=clipboard.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
plugins = self._placeholder.get_plugins()
self.assertEqual(plugins.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 7)
self.assertEqual(Placeholder.objects.count(), 4)
url = admin_reverse('cms_page_clear_placeholder', args=[clipboard.pk])
with self.assertNumQueries(FuzzyInt(70, 80)):
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(CMSPlugin.objects.count(), 4)
self.assertEqual(Placeholder.objects.count(), 3)
def test_plugins_copy_language(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD != 'email':
self.client.login(username='test', password='test')
else:
self.client.login(username='test@test.com', password='test@test.com')
self.assertEqual(1, CMSPlugin.objects.all().count())
url = admin_reverse('cms_page_copy_language', args=[self._page.pk])
data = dict(
source_language='en',
target_language='fr',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(2, CMSPlugin.objects.all().count())
def test_page_permission_inline_visibility(self):
User = get_user_model()
fields = dict(email='user@domain.com', password='user', is_staff=True)
if get_user_model().USERNAME_FIELD != 'email':
fields[get_user_model().USERNAME_FIELD] = 'user'
user = User(**fields)
user.save()
self._give_page_permission_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, AdminSite())
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, AdminSite())
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
username = getattr(another_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin_user = self._get_admin()
self._give_cms_permissions(admin_user)
self._give_permission(admin_user, Text, 'add')
username = getattr(admin_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with self.settings():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_form_errors(self):
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 10,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
site0 = Site.objects.create(domain='foo.com', name='foo.com')
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': page1.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': '#',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'pp',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page3 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en", parent=page2)
page3.title_set.update(path="hello/")
page3 = page3.reload()
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None, instance=page3)
self.assertFalse(form.is_valid())
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
page_types.pk, page.pk), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
create_page('Test', 'static.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(40, 66)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
with self.assertNumQueries(FuzzyInt(40, 72)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(FuzzyInt(18, 45)):
force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
with self.assertNumQueries(FuzzyInt(11, 29)):
force_text(self.client.get('/en/').content)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(18, 33)):
force_text(self.client.get('/en/admin/cms/page/'))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = '/en/admin/cms/page/published-pages/' # Not sure how to achieve this with reverse...
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with self.settings(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
if DJANGO_1_7:
charset = response._charset
else:
charset = response.charset
text = smart_str(response.content, charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
|
|
"""The kraken integration."""
from __future__ import annotations
import logging
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import device_registry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import KrakenData
from .const import (
CONF_TRACKED_ASSET_PAIRS,
DISPATCH_CONFIG_UPDATED,
DOMAIN,
SENSOR_TYPES,
SensorType,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add kraken entities from a config_entry."""
@callback
def async_update_sensors(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
dev_reg = device_registry.async_get(hass)
existing_devices = {
device.name: device.id
for device in device_registry.async_entries_for_config_entry(
dev_reg, config_entry.entry_id
)
}
sensors = []
for tracked_asset_pair in config_entry.options[CONF_TRACKED_ASSET_PAIRS]:
# Only create new devices
if (
device_name := create_device_name(tracked_asset_pair)
) in existing_devices:
existing_devices.pop(device_name)
else:
for sensor_type in SENSOR_TYPES:
sensors.append(
KrakenSensor(
hass.data[DOMAIN],
tracked_asset_pair,
sensor_type,
)
)
async_add_entities(sensors, True)
# Remove devices for asset pairs which are no longer tracked
for device_id in existing_devices.values():
dev_reg.async_remove_device(device_id)
async_update_sensors(hass, config_entry)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
DISPATCH_CONFIG_UPDATED,
async_update_sensors,
)
)
class KrakenSensor(CoordinatorEntity, SensorEntity):
"""Define a Kraken sensor."""
def __init__(
self,
kraken_data: KrakenData,
tracked_asset_pair: str,
sensor_type: SensorType,
) -> None:
"""Initialize."""
assert kraken_data.coordinator is not None
super().__init__(kraken_data.coordinator)
self.tracked_asset_pair_wsname = kraken_data.tradable_asset_pairs[
tracked_asset_pair
]
self._source_asset = tracked_asset_pair.split("/")[0]
self._target_asset = tracked_asset_pair.split("/")[1]
self._sensor_type = sensor_type["name"]
self._enabled_by_default = sensor_type["enabled_by_default"]
self._unit_of_measurement = self._target_asset
self._device_name = f"{self._source_asset} {self._target_asset}"
self._name = "_".join(
[
tracked_asset_pair.split("/")[0],
tracked_asset_pair.split("/")[1],
sensor_type["name"],
]
)
self._received_data_at_least_once = False
self._available = True
self._state = None
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_by_default
@property
def name(self) -> str:
"""Return the name."""
return self._name
@property
def unique_id(self) -> str:
"""Set unique_id for sensor."""
return self._name.lower()
@property
def native_value(self) -> StateType:
"""Return the state."""
return self._state
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._update_internal_state()
def _handle_coordinator_update(self) -> None:
self._update_internal_state()
super()._handle_coordinator_update()
def _update_internal_state(self) -> None:
try:
if self._sensor_type == "last_trade_closed":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"last_trade_closed"
][0]
if self._sensor_type == "ask":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"ask"
][0]
if self._sensor_type == "ask_volume":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"ask"
][1]
if self._sensor_type == "bid":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"bid"
][0]
if self._sensor_type == "bid_volume":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"bid"
][1]
if self._sensor_type == "volume_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"volume"
][0]
if self._sensor_type == "volume_last_24h":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"volume"
][1]
if self._sensor_type == "volume_weighted_average_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"volume_weighted_average"
][0]
if self._sensor_type == "volume_weighted_average_last_24h":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"volume_weighted_average"
][1]
if self._sensor_type == "number_of_trades_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"number_of_trades"
][0]
if self._sensor_type == "number_of_trades_last_24h":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"number_of_trades"
][1]
if self._sensor_type == "low_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"low"
][0]
if self._sensor_type == "low_last_24h":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"low"
][1]
if self._sensor_type == "high_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"high"
][0]
if self._sensor_type == "high_last_24h":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"high"
][1]
if self._sensor_type == "opening_price_today":
self._state = self.coordinator.data[self.tracked_asset_pair_wsname][
"opening_price"
]
self._received_data_at_least_once = True # Received data at least one time.
except TypeError:
if self._received_data_at_least_once:
if self._available:
_LOGGER.warning(
"Asset Pair %s is no longer available",
self._device_name,
)
self._available = False
@property
def icon(self) -> str:
"""Return the icon."""
if self._target_asset == "EUR":
return "mdi:currency-eur"
if self._target_asset == "GBP":
return "mdi:currency-gbp"
if self._target_asset == "USD":
return "mdi:currency-usd"
if self._target_asset == "JPY":
return "mdi:currency-jpy"
if self._target_asset == "XBT":
return "mdi:currency-btc"
return "mdi:cash"
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit the value is expressed in."""
if "number_of" not in self._sensor_type:
return self._unit_of_measurement
return None
@property
def available(self) -> bool:
"""Could the api be accessed during the last update call."""
return self._available and self.coordinator.last_update_success
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
return {
"identifiers": {(DOMAIN, f"{self._source_asset}_{self._target_asset}")},
"name": self._device_name,
"manufacturer": "Kraken.com",
"entry_type": "service",
}
def create_device_name(tracked_asset_pair: str) -> str:
"""Create the device name for a given tracked asset pair."""
return f"{tracked_asset_pair.split('/')[0]} {tracked_asset_pair.split('/')[1]}"
|
|
#!/usr/bin/env python
#
# NED Policy Manager (a.k.a the online workflow manager)
#
# Dependencies: pip install json2xml dicttoxml termcolor
#
# Example usage: ./online_workflow_manager.py child child 195.235.93.146 130.192.1.102 testCoop
import os
import json
import base64
import inspect
import requests
import dicttoxml
import upr_client
from sys import argv
from time import sleep
from xml.dom import minidom
from termcolor import colored
from lxml import etree, objectify
from lxml.etree import XMLSyntaxError
from requests.auth import HTTPBasicAuth
import xml.etree.ElementTree as ET
import logging
import datetime as dt
# TODO: control if there are sessions open, if true close them
class MyFormatter(logging.Formatter):
converter=dt.datetime.fromtimestamp
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
if datefmt:
s = ct.strftime(datefmt)
else:
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s,%03d" % (t, record.msecs)
return s
class AnalysisWorkflowManager(object):
DEBUG = True
DEBUG_MSG = ""
def dbprint(self, string):
if self.DEBUG == True:
self.logger.info(string)
def sanitize(string):
s = string.replace("\\n", "")
s = s.replace("\\r", "")
s = s.replace("\\t", "")
s = s.replace("\\/", "")
return s
def spm_mifa(self, spm_url, mifa_input):
#mifa_input = sanitize(mifa_input)
self.dbprint("CMON MIFA")
mifa_input = mifa_input.replace("\\n", "")
mifa_input = mifa_input.replace("\\r", "")
mifa_input = mifa_input.replace("\\t", "")
mifa_input = mifa_input.replace("\\/", "")
mifa_input = mifa_input.replace("\\\"", "\"")
rec_svc = ":8181/restconf/operations/reconciliation:muca"
url = "http://" + str(spm_url) + str(rec_svc)
self.dbprint("Contacting the SPM mifa service")
r = ""
headers = {'content-type': 'application/json'}
counter = 0
while counter < 3:
try:
if self.DEBUG == True:
#print str(rec_input)
self.dbprint( str(url) )
r = requests.post(url, auth=('admin', 'admin'),
headers=headers, data=mifa_input, timeout=None)
return r
except Exception:
counter = counter + 1
if counter < 3:
continue
# Give up
self.dbprint("ERROR: Could not reach SPM at " + str(url))
raise Exception ('Could not reach SPM.')
return None
def spm_sfa(self, spm_url, sfa_input):
print "cmon"
#sfa_input = sanitize(sfa_input)
sfa_input = sfa_input.replace("\\n", "")
sfa_input = sfa_input.replace("\\t", "")
sfa_input = sfa_input.replace("\\r", "")
sfa_input = sfa_input.replace("\\/", "")
print str(sfa_input)
print "done"
rec_svc = ":8181/restconf/operations/reconciliation:sucas"
url = "http://" + str(spm_url) + str(rec_svc)
print "Contacting the SPM SFA service"
r = ""
headers = {'content-type': 'application/json'}
counter = 0
print "sfa..."
while counter < 3:
try:
if self.DEBUG == True:
#print str(rec_input)
self.dbprint( str(url) )
r = requests.post(url, auth=('admin', 'admin'),
headers=headers, data=sfa_input, timeout=None)
return r
except Exception:
counter = counter + 1
if counter < 3:
continue
# Give up
self.dbprint("ERROR: Could not reach SPM at " + str(url))
self.DEBUG_MSG = "ERROR: Could not reach SPM at " + str(url)
raise Exception ('Could not reach SPM.')
return None
def spm_ifa(self, spm_url, ifa_input):
ifa_svc = ":8181/restconf/operations/reconciliation:sucad"
url = "http://" + str(spm_url) + str(ifa_svc)
self.dbprint("Contacting the SPM IFA service")
r = ""
headers = {'content-type': 'application/json'}
counter = 0
while counter < 3:
try:
if self.DEBUG == True:
#print str(rec_input)
self.dbprint( str(url) )
r = requests.post(url, auth=('admin', 'admin'),
headers=headers, data=ifa_input, timeout=None)
return r
except Exception:
counter = counter + 1
if counter < 3:
continue
# Give up
self.dbprint("ERROR: Could not reach SPM at " + str(url))
raise Exception ('Could not reach SPM.')
return None
def get_capability_from_mspl(self, mspl):
capabilities = ""
tree = ET.fromstring(mspl)
root = tree
for child in root:
for a in child:
if (str(a.tag).endswith("capability")):
for b in a:
self.dbprint("Found a capability: " + str(b.text))
capabilities = capabilities + str(b.text)
return capabilities
def get_creator(self, user, upr):
r = upr.get_user_creator(user)
if (r.status_code != 200):
self.dbprint( "ERROR getting creator of user during reconciliation" \
+ ". Status code = " + str(r.status_code))
raise Exception('Error getting creator of ' + str(user) \
+ ". Error is " + str(r.status_code) \
+ '. Does ' + str(user) + ' exist?')
data = r.json()
return data['creator']
def get_PolicyStack(self, user, upr, cooperative, coop, uncoop):
mspls = []
coopList = []
uncoopList = []
creator = user
# get all AG of user
r = upr.get_user_ag(user)
if r.status_code != 200:
self.dbprint( "ERROR getting AG of user + " + str(user) + " with error code " + str(r.status_code))
applicationGraphs = r.json()
while creator != None:
self.dbprint("")
self.dbprint("Computing policy stack of layer : " + user+ " " +creator)
#get all MSPLs of current stack layer
r = upr.get_mspl(is_reconciled='false', target=str(user), editor=str(creator))
if r.status_code != 200:
raise Exception('Could not get policies for user!')
mspl_list_json = r.json()
for mspl in mspl_list_json:
self.dbprint("Getting MSPL...")
# add MSPL to msplList
mspls.append(mspl['mspl'])
r = upr.get_user_list(user_id=creator)
if (r.status_code != 200):
self.dbprint("ERROR finding out cooperative data with + " \
+ str(r.status_code))
raise Exception ("Can't tell if cooperative: " \
+ str(r.status_code) )
data = r.json()
creator_is_cooperative = data['is_cooperative']
if cooperative and creator_is_cooperative:
try:
data = {}
data['id'] = str(coop)
data['ag'] = filter(lambda ag: ag['editor'].upper() == str(creator).upper(), applicationGraphs)[0]['ag'];
data['creator'] = creator
coopList.append(data)
coop += 1
self.dbprint("Adding to user's cooperative stack")
except IndexError:
self.dbprint( colored("ERROR: USER " + str(creator) + " DOES NOT HAVE AN AG", 'red') )
self.DEBUG_MSG = "ERROR: USER " + str(creator) + " DOES NOT HAVE AN AG"
else:
cooperative = False
try:
data = {}
data['id'] = str(uncoop)
data['ag'] = filter(lambda ag: ag['editor'].upper() == str(creator).upper(), applicationGraphs)[0]['ag'];
data['creator'] = creator
uncoopList.append(data)
uncoop += 1
self.dbprint("Adding to uncooperative stack")
except IndexError:
self.dbprint( colored("ERROR: USER " + str(creator) + " DOES NOT HAV AN AG", 'red') )
creator = self.get_creator(creator, upr)
data = {}
data['mspls'] = mspls
data['coopList'] = coopList
data['uncoopList'] = uncoopList
data['cooperative'] = cooperative
data['coop'] = coop
data['uncoop'] = uncoop
print "Finished policy stack"
self.dbprint("Finished policy stack")
return data
def mifa(self, user, password, upr_url, spm_url):
self.dbprint("Truth and MIFA...")
upr = upr_client.UPRClient(str(upr_url))
mspls = []
coopList = []
uncoopList = []
data = self.get_PolicyStack(user, upr, True, 1, 1)
mspls.extend(data['mspls'])
coopList.extend(data['coopList'])
coopList.reverse()
# Prepare input
data = {}
data['coop'] = coopList
data['MSPL'] = mspls
parent_json = {}
parent_json['input'] = data
mifa_input = json.dumps(parent_json, sort_keys=True, indent=4)
self.dbprint(mifa_input)
self.dbprint("Got to here")
r = self.spm_mifa(spm_url, mifa_input)
print str(r.status_code)
if r.status_code != 200:
self.dbprint( "ERROR: SPM returned " + str(r.status_code) )
self.DEBUG_MSG = "ERROR: SPM returned " + str(r.status_code)
raise Exception ("SPM returned " + str(r.status_code))
data = r.json()
try:
report = data['output']['report']
print "RECONCILIATION!"
try:
r = upr.post_reconciliation_report(user, user, report)
except Exception as ex:
print "Some exception" + str(ex)
print "RESULT"
if r.status_code != 201:
self.dbprint (colored('ERROR saving report: ' + str(r.status_code), 'red'))
else:
self.dbprint( colored('MIFA report created', 'green'))
print "DONE: "
report = base64.b64decode(r.json()['reconciliation_report'])
return report
except KeyError:
print "KEYERROR"
self.DEBUG_MSG = "No MIFA report"
self.dbprint( colored('No MIFA report', 'red'))
return None
def ifa(self, user, password, upr_url, spm_url):
self.dbprint("Truth and IFA...")
upr = upr_client.UPRClient(str(upr_url))
mspls = []
r = upr.get_mspl(is_reconciled='false', target=str(user), editor=str(user))
if r.status_code != 200:
raise Exception('Could not get policies for user!')
else:
mspl_list_json = r.json()
i = 1;
for mspl in mspl_list_json:
self.dbprint("Getting MSPL...")
# add MSPL to msplList
mspls.append({'id':str(i), 'mspl':mspl['mspl']})
i += 1
data = {}
data['MSPL'] = mspls
parent_json = {}
parent_json['input'] = data
ifa_input = json.dumps(parent_json, sort_keys=True, indent=4)
self.dbprint(ifa_input)
r = self.spm_ifa(spm_url, ifa_input)
if r.status_code != 200:
self.dbprint( "ERROR: SPM returned " + str(r.status_code) )
self.DEBUG_MSG = "ERROR: SPM returned " + str(r.status_code)
raise Exception ("SPM returned " + str(r.status_code))
data = r.json()
try:
report = data['output']['report']
upr.post_reconciliation_report(user, user, report)
except KeyError:
self.dbprint( colored('No IFA report', 'red'))
self.DEBUG_MSG = "No IFA report"
def sfa(self, user, password, mspl, upr_url, spm_url):
print "executing sfa"
self.dbprint("Truth and SFA...")
upr = upr_client.UPRClient(str(upr_url))
data = {}
mspl = mspl.replace("\\n", "")
mspl = mspl.replace("\\r", "")
mspl = mspl.replace("\\t", "")
mspl = mspl.replace("\\/", "")
mspl = mspl.replace("\\\"", "\"")
mspl = base64.b64encode(mspl)
data['MSPL'] = mspl
parent_json = {}
parent_json['input'] = data
sfa_input = json.dumps(parent_json, sort_keys=True, indent=4)
print "Sending to SPM"
r = self.spm_sfa(spm_url, sfa_input)
print "SPM returned " + str(r.status_code)
if r.status_code != 200:
self.dbprint( "ERROR: SPM returned " + str(r.status_code) )
self.DEBUG_MSG = "SPM returned " + str(r.status_code)
raise Exception ("SPM returned " + str(r.status_code))
data = r.json()
print "REPORT"
try:
report = base64.b64decode(data['output']['report'])
#r = upr.post_reconciliation_report(user, user, report)
return report
except KeyError:
self.dbprint( colored('No SFA report', 'red'))
self.DEBUG_MSG = "No SFA report"
def __init__(self, debug=False):
self.init_log()
# If debug is enabled
if (debug == True):
self.DEBUG = True
def init_log(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("WFM.log")
fh.setLevel(logging.DEBUG)
console = logging.StreamHandler()
formatter = MyFormatter(fmt='%(asctime)s %(message)s',datefmt='%Y-%m-%d,%H:%M:%S.%f')
fh.setFormatter(formatter)
console.setFormatter(formatter)
self.logger.addHandler(console)
self.logger.addHandler(fh)
#logging.basicConfig(filename=conf.LOG_FILE,level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
self.logger.info("--------")
self.logger.info("WFM running")
self.logger.info("--------")
def main():
args = argv
set_debug = None
for a in args:
if a == "--debug":
set_debug = True
args.remove('--debug')
if len(args) != 7:
print("workflow_manager_analysis.py {sfa, ifa, mifa} <username> <password>" + \
"<upr_address> <spm_address> <MSPL_ID>")
else:
script, analysis_type, user, password, upr_url, spm_url, mspl_id = argv
wfm = AnalysisWorkflowManager(debug=set_debug)
if analysis_type == "SFA":
r = wfm.sfa(user, password, mspl_id, upr_url, spm_url)
if analysis_type == "IFA":
r = wfm.ifa(user, password, upr_url, spm_url)
if analysis_type == "MIFA":
r = wfm.mifa(user, password, upr_url, spm_url)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Creates a map gof plot for a list of periods
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import matplotlib as mpl
if (mpl.get_backend() != 'agg'):
mpl.use('Agg') # Disables use of Tk/X11
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter
import pylab
# Import Broadband modules
from install_cfg import InstallCfg
import plot_utils
import PlotMap
import fault_utils
import plot_config
# Constants
MIN_Y_AXIS = -1.75
MAX_Y_AXIS = 1.75
COMP_EXT_RD50 = 'rotd50'
COMP_TITLE_RD50 = 'RotD50'
DIST_PERIODS = [0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0]
def read_resid(resid_file, period, summary_output):
"""
Reads the residual file resid_file and returns all data for the
requested period
"""
# Start empty
sta_x_data = []
sta_y_data = []
sta_resid_data = []
# Read residuals file and get information we need
input_file = open(resid_file, 'r')
# Look over header and figure out which column contains the period
# we need to plot
header = input_file.readline()
header = header.strip()
items = header.split()
index = -1
for idx, item in enumerate(items):
try:
val = float(item)
if val == period:
# Found period, save index
index = idx
break
except:
pass
if index < 0:
# If we don't have this period, nothing to do
print ("Residuals file %s does not have data for period %f" %
(resid_file, period))
# Close input file
input_file.close()
# Return empty sets
return sta_x_data, sta_y_data, sta_resid_data
# Index #3 has lon, #4 has lat
# Index #12 has component
# Indexes #10 and #11 have period range for valid data
# Read the rest of the file
for line in input_file:
items = line.split()
comp = items[12]
lon = items[3]
lat = items[4]
tmin = items[10]
tmax = items[11]
value = items[index]
# Skip components we don't know
if comp != COMP_EXT_RD50:
continue
if period >= float(tmin) and period <= float(tmax):
# Data within range, take it
sta_x_data.append(float(lon))
sta_y_data.append(float(lat))
sta_resid_data.append(float(value))
# Done reading the file
input_file.close()
# Write summary output for later processing
output_file = open(summary_output, 'w')
for lon, lat, val in zip(sta_x_data, sta_y_data, sta_resid_data):
output_file.write("%f %f %f\n" % (lon, lat, val))
output_file.close()
# Return the data we found
return sta_x_data, sta_y_data, sta_resid_data
def plot_map_gof(r_srcfile, r_stations, resid_file, comp_label, sim_id):
"""
Reads data from resid_file and plots a map gof plot with a number
of periods
"""
# Make sure we have a src or srf file
if (r_srcfile is None or r_srcfile == "" or
(not r_srcfile.endswith(".srf") and
not r_srcfile.endswith(".src"))):
# We need a SRC or SRF file to get the fault geometry
return
# Get directory names
install = InstallCfg.getInstance()
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_input_file = os.path.join(a_indir, r_srcfile)
a_station_file = os.path.join(a_indir, r_stations)
# Define boundaries to plot using the stations in the station file
(north, south,
east, west) = plot_utils.set_boundaries_from_stations(a_station_file,
a_input_file)
trace_file = "%s.trace" % (a_input_file)
simple_station_file = "%s.simple" % (a_station_file)
if r_srcfile.endswith(".srf"):
plot_utils.write_fault_trace(a_input_file, trace_file)
else:
plot_utils.write_simple_trace(a_input_file, trace_file)
plot_utils.write_simple_stations(a_station_file, simple_station_file)
# Get hypo_lon, hypo_lat from src/srf file
hypo_lon, hypo_lat = fault_utils.calculate_epicenter(a_input_file)
plotregion = [west, east, south, north]
topo = os.path.join(install.A_PLOT_DATA_DIR, 'calTopo18.bf')
coastal = os.path.join(install.A_PLOT_DATA_DIR, 'gshhs_h.txt')
border = os.path.join(install.A_PLOT_DATA_DIR, 'wdb_borders_h.txt')
# Collect all the data from the residuals file
all_sta_x_data = []
all_sta_y_data = []
all_sta_resid_data = []
for period in DIST_PERIODS:
summary_output = os.path.join(a_outdir, "%s-%d-resid-map-%.3f-%s.txt" %
(comp_label, sim_id,
period, COMP_EXT_RD50))
sta_x_data, sta_y_data, sta_resid_data = read_resid(resid_file,
period,
summary_output)
all_sta_x_data.append(sta_x_data)
all_sta_y_data.append(sta_y_data)
all_sta_resid_data.append(sta_resid_data)
# Now create the map GOF
outfile = os.path.join(a_outdir, "gof-map-%s-%d-rotd50.png" %
(comp_label, sim_id))
create_map_gof(all_sta_x_data, all_sta_y_data, all_sta_resid_data,
plotregion, topo, coastal, border, trace_file,
comp_label, sim_id, outfile, hypo_lat=hypo_lat,
hypo_lon=hypo_lon)
def create_map_gof(all_sta_x_data, all_sta_y_data, all_sta_resid_data,
plotregion, topo, coastal, border, fault, comp_label,
sim_id, outfile, hypo_lat=None, hypo_lon=None):
"""
Creates a gof distance plots for all the data and distances
provided
"""
plottitle = ("GOF Comparison between %s and simulation %d" %
(comp_label, sim_id))
# Read in topo data
topo_points = PlotMap.read_topo(topo, plotregion)
# Read in fault data
fault_x, fault_y = PlotMap.read_fault(fault)
# Read coastlines
coast_x, coast_y = PlotMap.read_coastal(coastal, plotregion)
# Read borders
bord_x, bord_y = PlotMap.read_coastal(border, plotregion)
# Create figure
num_plots = len(DIST_PERIODS)
if len(DIST_PERIODS) % 2:
num_plots = num_plots + 1
num_columns = num_plots // 2
fig, axs = pylab.plt.subplots(2, num_columns)
fig.set_size_inches(12, 6.5)
#fig.autofmt_xdate()
# Setup color scale
cmap = cm.gist_gray
norm = mcolors.Normalize(vmin=-2000.0, vmax=3000.0)
# Convert to list
subfigs = []
for y_subplot in range(0, 2):
for x_subplot in range(0, num_columns):
subfigs.append(axs[y_subplot, x_subplot])
# Fixed vmin and vmax for all plots
vmin = -1.5
vmax = 1.5
# # Find vmin and vmax for all plots
# vmin = 0.0
# vmax = 0.0
# for sta_resid_data in all_sta_resid_data:
# if len(sta_resid_data):
# vmin = min(vmin, min(sta_resid_data))
# vmax = max(vmax, max(sta_resid_data))
# # But make it symmetrical
# if abs(vmax) > abs(vmin):
# vmin = -vmax
# else:
# vmax = -vmin
# Good, now walk through each subfig
for (subfig, sta_x_data, sta_y_data,
sta_resid_data, period) in zip(subfigs, all_sta_x_data, all_sta_y_data,
all_sta_resid_data, DIST_PERIODS):
# Plot basemap
subfig.imshow(topo_points, cmap=cmap, norm=norm,
extent=plotregion, interpolation='nearest')
# Freeze the axis extents
subfig.set_autoscale_on(False)
# Plot coast lines
for idx in xrange(0, len(coast_x)):
subfig.plot(coast_x[idx], coast_y[idx], linestyle='-', color='0.75')
# Plot borders
for idx in xrange(0, len(bord_x)):
subfig.plot(bord_x[idx], bord_y[idx], linestyle='-', color='0.75')
# Plot fault trace
subfig.plot(fault_x, fault_y, linestyle='-', color='k')
# If we don't have at least 1 station for this period, create
# a fake station outside of the map area so that we can still
# create the empty plot with the colobar to the right
if not len(sta_x_data) or not len(sta_y_data):
sta_x_data = [1000.0]
sta_y_data = [1000.0]
sta_resid_data = [0.0]
# Plot hypocenter
if hypo_lat is not None and hypo_lon is not None:
hypo_lat = [hypo_lat]
hypo_lon = [hypo_lon]
subfig.scatter(hypo_lon, hypo_lat, marker=(5, 1, 0),
color='y', s=50)
# Plot the stations
im = subfig.scatter(sta_x_data, sta_y_data, s=20, c=sta_resid_data,
cmap=cm.jet_r, vmin=vmin, vmax=vmax,
marker='o')
# Adding colorbars to the right of each row
# if DIST_PERIODS.index(period) % num_columns == num_columns - 1:
# subfig.figure.colorbar(im, ax=subfig)
# Set degree formatting of tick values
major_formatter = FormatStrFormatter(u'%.1f\u00b0')
subfig.xaxis.set_major_formatter(major_formatter)
subfig.yaxis.set_major_formatter(major_formatter)
# # Turn on ticks for both sides of axis
# for tick in subfig.xaxis.get_major_ticks():
# tick.label1On = True
# tick.label2On = True
# for tick in subfig.yaxis.get_major_ticks():
# tick.label1On = True
# tick.label2On = True
# Set font size
for tick in subfig.get_xticklabels():
tick.set_fontsize(6)
tick.set_ha("right")
tick.set_rotation(30)
for tick in subfig.get_yticklabels():
tick.set_fontsize(6)
subfig.set_title("Period = %.3f s" % (period), size=8)
fig.subplots_adjust(left=0.05, right=0.91, hspace=0.0,
top=0.95, bottom=0.05)
colorbar_ax = fig.add_axes([0.93, 0.20, 0.02, 0.6])
fig.colorbar(im, cax=colorbar_ax)
fig.suptitle('%s' % (plottitle), size=12)
print("==> Created Map GoF plot: %s" % (outfile))
fig.savefig(outfile, format="png", transparent=False, dpi=plot_config.dpi)
pylab.close()
def usage():
"""
Prints usage information
"""
print("usage: %s <src_file> <stations> <resid_file> <label> <sim_id>" %
(sys.argv[0]))
return
if __name__ == '__main__':
if len(sys.argv) < 5:
usage()
sys.exit(1)
plot_map_gof(sys.argv[1], sys.argv[2], sys.argv[3],
sys.argv[4], int(sys.argv[5]))
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import next
from builtins import zip
from tempfile import NamedTemporaryFile
from airflow.utils.file import TemporaryDirectory
import gzip
import bz2
import tempfile
import os
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.hooks.hive_hooks import HiveCliHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.compression import uncompress_file
class S3ToHiveTransfer(BaseOperator):
"""
Moves data from S3 to Hive. The operator downloads a file from S3,
stores the file locally before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata from.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param s3_key: The key to be retrieved from S3. (templated)
:type s3_key: str
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:type field_dict: dict
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:type hive_table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values. (templated)
:type partition: dict
:param headers: whether the file contains column names on the first
line
:type headers: bool
:param check_headers: whether the column names on the first line should be
checked against the keys of field_dict
:type check_headers: bool
:param wildcard_match: whether the s3_key should be interpreted as a Unix
wildcard pattern
:type wildcard_match: bool
:param delimiter: field delimiter in the file
:type delimiter: str
:param aws_conn_id: source s3 connection
:type aws_conn_id: str
:parame verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- False: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- path/to/cert/bundle.pem: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param hive_cli_conn_id: destination hive connection
:type hive_cli_conn_id: str
:param input_compressed: Boolean to determine if file decompression is
required to process headers
:type input_compressed: bool
:param tblproperties: TBLPROPERTIES of the hive table being created
:type tblproperties: dict
:param select_expression: S3 Select expression
:type select_expression: str
"""
template_fields = ('s3_key', 'partition', 'hive_table')
template_ext = ()
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
s3_key,
field_dict,
hive_table,
delimiter=',',
create=True,
recreate=False,
partition=None,
headers=False,
check_headers=False,
wildcard_match=False,
aws_conn_id='aws_default',
verify=None,
hive_cli_conn_id='hive_cli_default',
input_compressed=False,
tblproperties=None,
select_expression=None,
*args, **kwargs):
super(S3ToHiveTransfer, self).__init__(*args, **kwargs)
self.s3_key = s3_key
self.field_dict = field_dict
self.hive_table = hive_table
self.delimiter = delimiter
self.create = create
self.recreate = recreate
self.partition = partition
self.headers = headers
self.check_headers = check_headers
self.wildcard_match = wildcard_match
self.hive_cli_conn_id = hive_cli_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.input_compressed = input_compressed
self.tblproperties = tblproperties
self.select_expression = select_expression
if (self.check_headers and
not (self.field_dict is not None and self.headers)):
raise AirflowException("To check_headers provide " +
"field_dict and headers")
def execute(self, context):
# Downloading file from S3
self.s3 = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
self.hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Downloading S3 file")
if self.wildcard_match:
if not self.s3.check_for_wildcard_key(self.s3_key):
raise AirflowException("No key matches {0}"
.format(self.s3_key))
s3_key_object = self.s3.get_wildcard_key(self.s3_key)
else:
if not self.s3.check_for_key(self.s3_key):
raise AirflowException(
"The key {0} does not exists".format(self.s3_key))
s3_key_object = self.s3.get_key(self.s3_key)
root, file_ext = os.path.splitext(s3_key_object.key)
if (self.select_expression and self.input_compressed and
file_ext.lower() != '.gz'):
raise AirflowException("GZIP is the only compression " +
"format Amazon S3 Select supports")
with TemporaryDirectory(prefix='tmps32hive_') as tmp_dir,\
NamedTemporaryFile(mode="wb",
dir=tmp_dir,
suffix=file_ext) as f:
self.log.info("Dumping S3 key {0} contents to local file {1}"
.format(s3_key_object.key, f.name))
if self.select_expression:
option = {}
if self.headers:
option['FileHeaderInfo'] = 'USE'
if self.delimiter:
option['FieldDelimiter'] = self.delimiter
input_serialization = {'CSV': option}
if self.input_compressed:
input_serialization['CompressionType'] = 'GZIP'
content = self.s3.select_key(
bucket_name=s3_key_object.bucket_name,
key=s3_key_object.key,
expression=self.select_expression,
input_serialization=input_serialization
)
f.write(content.encode("utf-8"))
else:
s3_key_object.download_fileobj(f)
f.flush()
if self.select_expression or not self.headers:
self.log.info("Loading file %s into Hive", f.name)
self.hive.load_file(
f.name,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties)
else:
# Decompressing file
if self.input_compressed:
self.log.info("Uncompressing file %s", f.name)
fn_uncompressed = uncompress_file(f.name,
file_ext,
tmp_dir)
self.log.info("Uncompressed to %s", fn_uncompressed)
# uncompressed file available now so deleting
# compressed file to save disk space
f.close()
else:
fn_uncompressed = f.name
# Testing if header matches field_dict
if self.check_headers:
self.log.info("Matching file header against field_dict")
header_list = self._get_top_row_as_list(fn_uncompressed)
if not self._match_headers(header_list):
raise AirflowException("Header check failed")
# Deleting top header row
self.log.info("Removing header from file %s", fn_uncompressed)
headless_file = (
self._delete_top_row_and_compress(fn_uncompressed,
file_ext,
tmp_dir))
self.log.info("Headless file %s", headless_file)
self.log.info("Loading file %s into Hive", headless_file)
self.hive.load_file(headless_file,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties)
def _get_top_row_as_list(self, file_name):
with open(file_name, 'rt') as f:
header_line = f.readline().strip()
header_list = header_line.split(self.delimiter)
return header_list
def _match_headers(self, header_list):
if not header_list:
raise AirflowException("Unable to retrieve header row from file")
field_names = self.field_dict.keys()
if len(field_names) != len(header_list):
self.log.warning("Headers count mismatch"
"File headers:\n {header_list}\n"
"Field names: \n {field_names}\n"
.format(**locals()))
return False
test_field_match = [h1.lower() == h2.lower()
for h1, h2 in zip(header_list, field_names)]
if not all(test_field_match):
self.log.warning("Headers do not match field names"
"File headers:\n {header_list}\n"
"Field names: \n {field_names}\n"
.format(**locals()))
return False
else:
return True
@staticmethod
def _delete_top_row_and_compress(
input_file_name,
output_file_ext,
dest_dir):
# When output_file_ext is not defined, file is not compressed
open_fn = open
if output_file_ext.lower() == '.gz':
open_fn = gzip.GzipFile
elif output_file_ext.lower() == '.bz2':
open_fn = bz2.BZ2File
os_fh_output, fn_output = \
tempfile.mkstemp(suffix=output_file_ext, dir=dest_dir)
with open(input_file_name, 'rb') as f_in, \
open_fn(fn_output, 'wb') as f_out:
f_in.seek(0)
next(f_in)
for line in f_in:
f_out.write(line)
return fn_output
|
|
#Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
#Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
#
#THE BSD LICENSE
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
#1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
#IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
#OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
#IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
#NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#from ctypes import *
#from ctypes.util import find_library
from numpy import (float32, float64, uint8, int32, require)
#import ctypes
#import numpy as np
from ctypes import (Structure, c_char_p, c_int, c_float, c_uint, c_long,
c_void_p, cdll, POINTER)
from numpy.ctypeslib import ndpointer
import os
import sys
STRING = c_char_p
class CustomStructure(Structure):
"""
This class extends the functionality of the ctype's structure
class by adding custom default values to the fields and a way of translating
field types.
"""
_defaults_ = {}
_translation_ = {}
def __init__(self):
Structure.__init__(self)
self.__field_names = [ f for (f, t) in self._fields_]
self.update(self._defaults_)
def update(self, dict):
for k, v in dict.items():
if k in self.__field_names:
setattr(self, k, self.__translate(k, v))
else:
raise KeyError('No such member: ' + k)
def __getitem__(self, k):
if k in self.__field_names:
return self.__translate_back(k, getattr(self, k))
def __setitem__(self, k, v):
if k in self.__field_names:
setattr(self, k, self.__translate(k, v))
else:
raise KeyError('No such member: ' + k)
def keys(self):
return self.__field_names
def __translate(self, k, v):
if k in self._translation_:
if v in self._translation_[k]:
return self._translation_[k][v]
return v
def __translate_back(self, k, v):
if k in self._translation_:
for tk, tv in self._translation_[k].items():
if tv == v:
return tk
return v
class FLANNParameters(CustomStructure):
_fields_ = [
('algorithm', c_int),
('checks', c_int),
('eps', c_float),
('sorted', c_int),
('max_neighbors', c_int),
('cores', c_int),
('trees', c_int),
('leaf_max_size', c_int),
('branching', c_int),
('iterations', c_int),
('centers_init', c_int),
('cb_index', c_float),
('target_precision', c_float),
('build_weight', c_float),
('memory_weight', c_float),
('sample_fraction', c_float),
('table_number_', c_uint),
('key_size_', c_uint),
('multi_probe_level_', c_uint),
('log_level', c_int),
('random_seed', c_long),
]
_defaults_ = {
'algorithm' : 'kdtree',
'checks' : 32,
'eps' : 0.0,
'sorted' : 1,
'max_neighbors' : -1,
'cores' : 0,
'trees' : 1,
'leaf_max_size' : 4,
'branching' : 32,
'iterations' : 5,
'centers_init' : 'random',
'cb_index' : 0.5,
'target_precision' : 0.9,
'build_weight' : 0.01,
'memory_weight' : 0.0,
'sample_fraction' : 0.1,
'table_number_': 12,
'key_size_': 20,
'multi_probe_level_': 2,
'log_level' : 'warning',
'random_seed' : -1
}
_translation_ = {
'algorithm' : {'linear' : 0, 'kdtree' : 1, 'kmeans' : 2, 'composite' : 3, 'kdtree_single' : 4, 'hierarchical': 5, 'lsh': 6, 'saved': 254, 'autotuned' : 255, 'default' : 1},
'centers_init' : {'random' : 0, 'gonzales' : 1, 'kmeanspp' : 2, 'default' : 0},
'log_level' : {'none' : 0, 'fatal' : 1, 'error' : 2, 'warning' : 3, 'info' : 4, 'default' : 2}
}
default_flags = ['C_CONTIGUOUS', 'ALIGNED']
allowed_types = [ float32, float64, uint8, int32]
FLANN_INDEX = c_void_p
def load_flann_library():
root_dir = os.path.abspath(os.path.dirname(__file__))
libnames = ['libflann.so']
libdir = 'lib'
if sys.platform == 'win32':
libnames = ['flann.dll', 'libflann.dll']
elif sys.platform == 'darwin':
libnames = ['libflann.dylib']
while root_dir is not None:
for libname in libnames:
try:
#print 'Trying ',os.path.join(root_dir,'lib',libname)
flannlib = cdll[os.path.join(root_dir, libdir, libname)]
return flannlib
except Exception:
pass
try:
flannlib = cdll[os.path.join(root_dir, 'build', libdir, libname)]
return flannlib
except Exception:
pass
tmp = os.path.dirname(root_dir)
if tmp == root_dir:
root_dir = None
else:
root_dir = tmp
# if we didn't find the library so far, try loading without
# a full path as a last resort
for libname in libnames:
try:
#print 'Trying',libname
flannlib = cdll[libname]
return flannlib
except:
pass
return None
flannlib = load_flann_library()
if flannlib is None:
raise ImportError('Cannot load dynamic library. Did you compile FLANN?')
class FlannLib(object):
pass
flann = FlannLib()
flannlib.flann_log_verbosity.restype = None
flannlib.flann_log_verbosity.argtypes = [
c_int # level
]
flannlib.flann_set_distance_type.restype = None
flannlib.flann_set_distance_type.argtypes = [
c_int,
c_int,
]
type_mappings = ( ('float', 'float32'),
('double', 'float64'),
('byte', 'uint8'),
('int', 'int32') )
def define_functions(str):
for type in type_mappings:
eval(compile(str % {'C': type[0], 'numpy': type[1]}, '<string>', 'exec'))
flann.build_index = {}
define_functions(r"""
flannlib.flann_build_index_%(C)s.restype = FLANN_INDEX
flannlib.flann_build_index_%(C)s.argtypes = [
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
POINTER(c_float), # speedup
POINTER(FLANNParameters) # flann_params
]
flann.build_index[%(numpy)s] = flannlib.flann_build_index_%(C)s
""")
flann.save_index = {}
define_functions(r"""
flannlib.flann_save_index_%(C)s.restype = None
flannlib.flann_save_index_%(C)s.argtypes = [
FLANN_INDEX, # index_id
c_char_p #filename
]
flann.save_index[%(numpy)s] = flannlib.flann_save_index_%(C)s
""")
flann.load_index = {}
define_functions(r"""
flannlib.flann_load_index_%(C)s.restype = FLANN_INDEX
flannlib.flann_load_index_%(C)s.argtypes = [
c_char_p, #filename
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
]
flann.load_index[%(numpy)s] = flannlib.flann_load_index_%(C)s
""")
flann.find_nearest_neighbors = {}
define_functions(r"""
flannlib.flann_find_nearest_neighbors_%(C)s.restype = c_int
flannlib.flann_find_nearest_neighbors_%(C)s.argtypes = [
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # testset
c_int, # tcount
ndpointer(int32, ndim=2, flags='aligned, c_contiguous, writeable'), # result
ndpointer(float32, ndim=2, flags='aligned, c_contiguous, writeable'), # dists
c_int, # nn
POINTER(FLANNParameters) # flann_params
]
flann.find_nearest_neighbors[%(numpy)s] = flannlib.flann_find_nearest_neighbors_%(C)s
""")
# fix definition for the 'double' case
flannlib.flann_find_nearest_neighbors_double.restype = c_int
flannlib.flann_find_nearest_neighbors_double.argtypes = [
ndpointer(float64, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
ndpointer(float64, ndim=2, flags='aligned, c_contiguous'), # testset
c_int, # tcount
ndpointer(int32, ndim=2, flags='aligned, c_contiguous, writeable'), # result
ndpointer(float64, ndim=2, flags='aligned, c_contiguous, writeable'), # dists
c_int, # nn
POINTER(FLANNParameters) # flann_params
]
flann.find_nearest_neighbors[float64] = flannlib.flann_find_nearest_neighbors_double
flann.find_nearest_neighbors_index = {}
define_functions(r"""
flannlib.flann_find_nearest_neighbors_index_%(C)s.restype = c_int
flannlib.flann_find_nearest_neighbors_index_%(C)s.argtypes = [
FLANN_INDEX, # index_id
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # testset
c_int, # tcount
ndpointer(int32, ndim=2, flags='aligned, c_contiguous, writeable'), # result
ndpointer(float32, ndim=2, flags='aligned, c_contiguous, writeable'), # dists
c_int, # nn
POINTER(FLANNParameters) # flann_params
]
flann.find_nearest_neighbors_index[%(numpy)s] = flannlib.flann_find_nearest_neighbors_index_%(C)s
""")
flannlib.flann_find_nearest_neighbors_index_double.restype = c_int
flannlib.flann_find_nearest_neighbors_index_double.argtypes = [
FLANN_INDEX, # index_id
ndpointer(float64, ndim=2, flags='aligned, c_contiguous'), # testset
c_int, # tcount
ndpointer(int32, ndim=2, flags='aligned, c_contiguous, writeable'), # result
ndpointer(float64, ndim=2, flags='aligned, c_contiguous, writeable'), # dists
c_int, # nn
POINTER(FLANNParameters) # flann_params
]
flann.find_nearest_neighbors_index[float64] = flannlib.flann_find_nearest_neighbors_index_double
flann.radius_search = {}
define_functions(r"""
flannlib.flann_radius_search_%(C)s.restype = c_int
flannlib.flann_radius_search_%(C)s.argtypes = [
FLANN_INDEX, # index_id
ndpointer(%(numpy)s, ndim=1, flags='aligned, c_contiguous'), # query
ndpointer(int32, ndim=1, flags='aligned, c_contiguous, writeable'), # indices
ndpointer(float32, ndim=1, flags='aligned, c_contiguous, writeable'), # dists
c_int, # max_nn
c_float, # radius
POINTER(FLANNParameters) # flann_params
]
flann.radius_search[%(numpy)s] = flannlib.flann_radius_search_%(C)s
""")
flannlib.flann_radius_search_double.restype = c_int
flannlib.flann_radius_search_double.argtypes = [
FLANN_INDEX, # index_id
ndpointer(float64, ndim=1, flags='aligned, c_contiguous'), # query
ndpointer(int32, ndim=1, flags='aligned, c_contiguous, writeable'), # indices
ndpointer(float64, ndim=1, flags='aligned, c_contiguous, writeable'), # dists
c_int, # max_nn
c_float, # radius
POINTER(FLANNParameters) # flann_params
]
flann.radius_search[float64] = flannlib.flann_radius_search_double
flann.compute_cluster_centers = {}
define_functions(r"""
flannlib.flann_compute_cluster_centers_%(C)s.restype = c_int
flannlib.flann_compute_cluster_centers_%(C)s.argtypes = [
ndpointer(%(numpy)s, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
c_int, # clusters
ndpointer(float32, flags='aligned, c_contiguous, writeable'), # result
POINTER(FLANNParameters) # flann_params
]
flann.compute_cluster_centers[%(numpy)s] = flannlib.flann_compute_cluster_centers_%(C)s
""")
# double is an exception
flannlib.flann_compute_cluster_centers_double.restype = c_int
flannlib.flann_compute_cluster_centers_double.argtypes = [
ndpointer(float64, ndim=2, flags='aligned, c_contiguous'), # dataset
c_int, # rows
c_int, # cols
c_int, # clusters
ndpointer(float64, flags='aligned, c_contiguous, writeable'), # result
POINTER(FLANNParameters) # flann_params
]
flann.compute_cluster_centers[float64] = flannlib.flann_compute_cluster_centers_double
flann.free_index = {}
define_functions(r"""
flannlib.flann_free_index_%(C)s.restype = None
flannlib.flann_free_index_%(C)s.argtypes = [
FLANN_INDEX, # index_id
POINTER(FLANNParameters) # flann_params
]
flann.free_index[%(numpy)s] = flannlib.flann_free_index_%(C)s
""")
def ensure_2d_array(arr, flags, **kwargs):
arr = require(arr, requirements=flags, **kwargs)
if len(arr.shape) == 1:
arr = arr.reshape(-1, arr.size)
return arr
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import re
import pytest
from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState
from indico.modules.events.abstracts.notifications import send_abstract_notifications
from indico.modules.events.abstracts.util import build_default_email_template
from indico.modules.events.contributions.models.types import ContributionType
from indico.modules.events.tracks.models.tracks import Track
from indico.util.date_time import now_utc
pytest_plugins = 'indico.modules.events.abstracts.testing.fixtures'
def assert_text_equal(v1, v2):
"""Compare two strings, ignoring white space and line breaks."""
assert re.sub(r'\s', '', v1) == re.sub(r'\s', '', v2)
@pytest.fixture
def create_dummy_track(db):
def _create(event):
track = Track(title='Dummy Track', event=event)
db.session.add(track)
db.session.flush()
return track
return _create
@pytest.fixture
def create_dummy_contrib_type(db):
def _create(event, name='Poster'):
contrib_type = ContributionType(name=name, event=event)
db.session.add(contrib_type)
db.session.flush()
return contrib_type
return _create
@pytest.fixture
def create_email_template(db):
def _create(event, position, tpl_type, title, rules, stop_on_match):
tpl = build_default_email_template(event, tpl_type)
tpl.position = position
tpl.title = title
tpl.rules = rules
tpl.stop_on_match = stop_on_match
return tpl
return _create
@pytest.fixture
def abstract_objects(dummy_abstract, create_dummy_contrib_type, create_dummy_track):
event = dummy_abstract.event
return event, dummy_abstract, create_dummy_track(event), create_dummy_contrib_type(event)
@pytest.mark.usefixtures('request_context')
def test_abstract_notification(mocker, abstract_objects, create_email_template, dummy_user):
send_email = mocker.patch('indico.modules.events.abstracts.notifications.send_email')
event, abstract, track, contrib_type = abstract_objects
event.abstract_email_templates.append(
create_email_template(event, 0, 'accept', 'accepted', [{'state': [AbstractState.accepted.value]}], True))
send_abstract_notifications(abstract)
assert send_email.call_count == 0
abstract.accepted_contrib_type = contrib_type
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
send_abstract_notifications(abstract)
assert send_email.call_count == 1
@pytest.mark.usefixtures('request_context')
def test_notification_rules(mocker, abstract_objects, create_email_template, dummy_user, dummy_event):
send_email = mocker.patch('indico.modules.events.abstracts.notifications.send_email')
event, abstract, track, contrib_type = abstract_objects
event.abstract_email_templates.append(
create_email_template(event, 0, 'merge', 'merged poster for track', [
{'state': [AbstractState.merged.value], 'track': [track.id]}
], True))
send_abstract_notifications(abstract)
assert send_email.call_count == 0
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
abstract.accepted_track = track
send_abstract_notifications(abstract)
assert send_email.call_count == 0
abstract.state = AbstractState.merged
abstract.merged_into = Abstract(title='test', submitter=dummy_user, event=dummy_event)
abstract.accepted_track = None
abstract.submitted_for_tracks = {track}
send_abstract_notifications(abstract)
assert send_email.call_count == 1
@pytest.mark.usefixtures('request_context')
def test_notification_several_conditions(db, mocker, abstract_objects, create_email_template, create_dummy_track,
create_dummy_contrib_type, dummy_user):
event, abstract, track, contrib_type = abstract_objects
event.abstract_email_templates = [
create_email_template(event, 0, 'accept', 'accepted', [
{'state': [AbstractState.accepted.value], 'track': [track.id], 'contribution_type': [contrib_type.id]},
{'state': [AbstractState.accepted.value], 'contribution_type': []}
], True)
]
send_email = mocker.patch('indico.modules.events.abstracts.notifications.send_email')
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
abstract.accepted_track = track
send_abstract_notifications(abstract)
assert send_email.call_count == 1
send_email.reset_mock()
abstract.accepted_contrib_type = contrib_type
send_abstract_notifications(abstract)
assert send_email.call_count == 1
send_email.reset_mock()
abstract.accepted_track = create_dummy_track(event)
abstract.accepted_contrib_type = create_dummy_contrib_type(event, name='Presentation')
db.session.flush()
send_abstract_notifications(abstract)
assert send_email.call_count == 0
@pytest.mark.usefixtures('request_context')
def test_notification_any_conditions(mocker, abstract_objects, create_email_template, dummy_user):
event, abstract, track, contrib_type = abstract_objects
event.abstract_email_templates = [
create_email_template(event, 0, 'accept', 'accepted', [
{'state': [AbstractState.accepted.value]}
], True)
]
send_email = mocker.patch('indico.modules.events.abstracts.notifications.send_email')
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
abstract.accepted_track = track
send_abstract_notifications(abstract)
assert send_email.call_count == 1
@pytest.mark.usefixtures('request_context')
def test_notification_stop_on_match(mocker, abstract_objects, create_email_template, dummy_user):
event, abstract, track, contrib_type = abstract_objects
event.abstract_email_templates = [
create_email_template(event, 0, 'accept', 'accepted poster', [
{'state': [AbstractState.accepted.value]}
], False),
create_email_template(event, 0, 'accept', 'accepted poster 2', [
{'state': [AbstractState.accepted.value]}
], True)
]
send_email = mocker.patch('indico.modules.events.abstracts.notifications.send_email')
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
send_abstract_notifications(abstract)
assert send_email.call_count == 2
send_email.reset_mock()
event.abstract_email_templates[0].stop_on_match = True
send_abstract_notifications(abstract)
assert send_email.call_count == 1
@pytest.mark.usefixtures('request_context')
def test_email_content(monkeypatch, abstract_objects, create_email_template, dummy_user):
def _mock_send_email(email, event, module, user):
assert event == ev
assert module == 'Abstracts'
assert email['subject'] == '[Indico] Abstract Acceptance notification (#314)'
assert_text_equal(email['body'], '''
Dear Guinea Pig,
We're pleased to announce that your abstract "Broken Symmetry and the Mass of Gauge Vector Mesons" with ID
#314 has been accepted in track "Dummy Track" (Poster).
See below a summary of your submitted abstract:
Conference: {event.title}
Submitted by: Guinea Pig
Title: Broken Symmetry and the Mass of Gauge Vector Mesons
Primary Authors: John Doe, Pocahontas Silva, John Smith
Co-authors:
Track classification: Dummy Track
Presentation type: Poster
For a more detailed summary please visit the page of your abstract:
http://localhost/event/-314/abstracts/1234/
Kind regards,
The organizers of {event.title}
--
Indico :: Call for Abstracts
http://localhost/event/{event.id}/
'''.format(event=ev))
ev, abstract, track, contrib_type = abstract_objects
monkeypatch.setattr('indico.modules.events.abstracts.notifications.send_email', _mock_send_email)
ev.abstract_email_templates.append(
create_email_template(ev, 0, 'accept', 'accept', [{'state': [AbstractState.accepted.value]}], True))
abstract.accepted_contrib_type = contrib_type
abstract.accepted_track = track
abstract.state = AbstractState.accepted
abstract.judge = dummy_user
abstract.judgment_dt = now_utc(False)
send_abstract_notifications(abstract)
|
|
from __future__ import absolute_import
from collections import defaultdict
import functools
import itertools
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.locations import (PIP_DELETE_MARKER_FILENAME, build_prefix)
from pip.req.req_install import InstallRequirement
from pip.utils import (display_path, rmtree, dist_in_usersite, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None):
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combinable.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if ((not name or not self.has_requirement(name)) and not
install_req.match_markers()):
# Only log if we haven't already got install_req from somewhere.
logger.debug("Ignore %s: markers %r don't match",
install_req.name, install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
if parent_req_name is None and self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
if not self.has_requirement(name):
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Canonicalise to the already-added object
install_req = self.get_requirement(name)
# No need to scan, this is a duplicate requirement.
result = []
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def _walk_req_to_install(self, handler):
"""Call handler for all pending reqs.
:param handler: Handle a single requirement. Should take a requirement
to install. Can optionally return an iterable of additional
InstallRequirements to cover.
"""
# The list() here is to avoid potential mutate-while-iterating bugs.
discovered_reqs = []
reqs = itertools.chain(
list(self.unnamed_requirements), list(self.requirements.values()),
discovered_reqs)
for req_to_install in reqs:
more_reqs = handler(req_to_install)
if more_reqs:
discovered_reqs.extend(more_reqs)
def locate_files(self):
"""Remove in 7.0: used by --no-download"""
self._walk_req_to_install(self._locate_file)
def _locate_file(self, req_to_install):
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and
not dist_in_usersite(
req_to_install.satisfied_by
)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
logger.info(
'Requirement already satisfied (use --upgrade to '
'upgrade): %s',
req_to_install,
)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(
self.src_dir
)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(
self.build_dir,
)
if (req_to_install.source_dir is not None and not
os.path.isdir(req_to_install.source_dir)):
raise InstallationError(
'Could not install requirement %s because source folder %s'
' does not exist (perhaps --no-download was used without '
'first running an equivalent install with --no-install?)' %
(req_to_install, req_to_install.source_dir)
)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self, finder, req_to_install):
"""Prepare a single requirements files.
:return: A list of addition InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(finder, self.upgrade)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
try:
if req_to_install.link.is_wheel and \
self.wheel_download_dir:
# when doing 'pip wheel`
download_dir = self.wheel_download_dir
do_download = True
else:
download_dir = self.download_dir
do_download = self.is_download
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, do_download, session=self.session,
)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
if self._pip_has_created_build_dir():
logger.debug('Removing temporary dir %s...', self.build_dir)
rmtree(self.build_dir)
def _pip_has_created_build_dir(self):
return (
self.build_dir == build_prefix and
os.path.exists(
os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)
)
)
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if (req.name == 'distribute' and
req.installed_version is not None and
req.installed_version in distribute_req):
to_install.remove(req)
to_install.append(req)
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's
# pkg_resources. It's ok *not* to check if setuptools>=0.7
# because if someone were actually trying to ugrade from
# distribute to setuptools 0.6.X, then all this could do is
# actually help, although that upgade path was certainly never
# "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = \
pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = \
pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
|
# -*- coding: utf-8 -*-
"""
flaskbb.user.models
~~~~~~~~~~~~~~~~~~~~
This module provides the models for the user.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app, url_for
from flask.ext.login import UserMixin, AnonymousUserMixin
from flaskbb._compat import max_integer
from flaskbb.extensions import db, cache
from flaskbb.utils.settings import flaskbb_config
from flaskbb.forum.models import (Post, Topic, topictracker, TopicsRead,
ForumsRead)
groups_users = db.Table(
'groups_users',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id')),
db.Column('group_id', db.Integer(), db.ForeignKey('groups.id')))
class Group(db.Model):
__tablename__ = "groups"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
# Group types
admin = db.Column(db.Boolean, default=False, nullable=False)
super_mod = db.Column(db.Boolean, default=False, nullable=False)
mod = db.Column(db.Boolean, default=False, nullable=False)
guest = db.Column(db.Boolean, default=False, nullable=False)
banned = db.Column(db.Boolean, default=False, nullable=False)
# Moderator permissions (only available when the user a moderator)
mod_edituser = db.Column(db.Boolean, default=False, nullable=False)
mod_banuser = db.Column(db.Boolean, default=False, nullable=False)
# User permissions
editpost = db.Column(db.Boolean, default=True, nullable=False)
deletepost = db.Column(db.Boolean, default=False, nullable=False)
deletetopic = db.Column(db.Boolean, default=False, nullable=False)
posttopic = db.Column(db.Boolean, default=True, nullable=False)
postreply = db.Column(db.Boolean, default=True, nullable=False)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self):
"""Saves a group"""
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes a group"""
db.session.delete(self)
db.session.commit()
return self
class User(db.Model, UserMixin):
__tablename__ = "users"
__searchable__ = ['username', 'email']
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(200), unique=True, nullable=False)
email = db.Column(db.String(200), unique=True, nullable=False)
_password = db.Column('password', db.String(120), nullable=False)
date_joined = db.Column(db.DateTime, default=datetime.utcnow())
lastseen = db.Column(db.DateTime, default=datetime.utcnow())
birthday = db.Column(db.DateTime)
gender = db.Column(db.String(10))
website = db.Column(db.String(200))
location = db.Column(db.String(100))
signature = db.Column(db.Text)
avatar = db.Column(db.String(200))
notes = db.Column(db.Text)
theme = db.Column(db.String(15))
posts = db.relationship("Post", backref="user", lazy="dynamic")
topics = db.relationship("Topic", backref="user", lazy="dynamic")
post_count = db.Column(db.Integer, default=0)
primary_group_id = db.Column(db.Integer, db.ForeignKey('groups.id'),
nullable=False)
primary_group = db.relationship('Group', lazy="joined",
backref="user_group", uselist=False,
foreign_keys=[primary_group_id])
secondary_groups = \
db.relationship('Group',
secondary=groups_users,
primaryjoin=(groups_users.c.user_id == id),
backref=db.backref('users', lazy='dynamic'),
lazy='dynamic')
tracked_topics = \
db.relationship("Topic", secondary=topictracker,
primaryjoin=(topictracker.c.user_id == id),
backref=db.backref("topicstracked", lazy="dynamic"),
lazy="dynamic")
# Properties
@property
def last_post(self):
"""Returns the latest post from the user"""
return Post.query.filter(Post.user_id == self.id).\
order_by(Post.date_created.desc()).first()
@property
def url(self):
"""Returns the url for the user"""
return url_for("user.profile", username=self.username)
@property
def permissions(self):
"""Returns the permissions for the user"""
return self.get_permissions()
@property
def days_registered(self):
"""Returns the amount of days the user is registered."""
days_registered = (datetime.utcnow() - self.date_joined).days
if not days_registered:
return 1
return days_registered
@property
def topic_count(self):
"""Returns the thread count"""
return Topic.query.filter(Topic.user_id == self.id).count()
@property
def posts_per_day(self):
"""Returns the posts per day count"""
return round((float(self.post_count) / float(self.days_registered)), 1)
@property
def topics_per_day(self):
"""Returns the topics per day count"""
return round((float(self.topic_count) / float(self.days_registered)), 1)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.username)
def _get_password(self):
"""Returns the hashed password"""
return self._password
def _set_password(self, password):
"""Generates a password hash for the provided password"""
self._password = generate_password_hash(password)
# Hide password encryption by exposing password field only.
password = db.synonym('_password',
descriptor=property(_get_password,
_set_password))
def check_password(self, password):
"""Check passwords. If passwords match it returns true, else false"""
if self.password is None:
return False
return check_password_hash(self.password, password)
@classmethod
def authenticate(cls, login, password):
"""A classmethod for authenticating users
It returns true if the user exists and has entered a correct password
:param login: This can be either a username or a email address.
:param password: The password that is connected to username and email.
"""
user = cls.query.filter(db.or_(User.username == login,
User.email == login)).first()
if user:
authenticated = user.check_password(password)
else:
authenticated = False
return user, authenticated
def _make_token(self, data, timeout):
s = Serializer(current_app.config['SECRET_KEY'], timeout)
return s.dumps(data)
def _verify_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
data = None
expired, invalid = False, False
try:
data = s.loads(token)
except SignatureExpired:
expired = True
except Exception:
invalid = True
return expired, invalid, data
def make_reset_token(self, expiration=3600):
"""Creates a reset token. The duration can be configured through the
expiration parameter.
:param expiration: The time in seconds how long the token is valid.
"""
return self._make_token({'id': self.id, 'op': 'reset'}, expiration)
def verify_reset_token(self, token):
"""Verifies a reset token. It returns three boolean values based on
the state of the token (expired, invalid, data)
:param token: The reset token that should be checked.
"""
expired, invalid, data = self._verify_token(token)
if data and data.get('id') == self.id and data.get('op') == 'reset':
data = True
else:
data = False
return expired, invalid, data
def all_topics(self, page):
"""Returns a paginated result with all topics the user has created."""
return Topic.query.filter(Topic.user_id == self.id).\
filter(Post.topic_id == Topic.id).\
order_by(Post.id.desc()).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
def all_posts(self, page):
"""Returns a paginated result with all posts the user has created."""
return Post.query.filter(Post.user_id == self.id).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
def track_topic(self, topic):
"""Tracks the specified topic
:param topic: The topic which should be added to the topic tracker.
"""
if not self.is_tracking_topic(topic):
self.tracked_topics.append(topic)
return self
def untrack_topic(self, topic):
"""Untracks the specified topic
:param topic: The topic which should be removed from the
topic tracker.
"""
if self.is_tracking_topic(topic):
self.tracked_topics.remove(topic)
return self
def is_tracking_topic(self, topic):
"""Checks if the user is already tracking this topic
:param topic: The topic which should be checked.
"""
return self.tracked_topics.filter(
topictracker.c.topic_id == topic.id).count() > 0
def add_to_group(self, group):
"""Adds the user to the `group` if he isn't in it.
:param group: The group which should be added to the user.
"""
if not self.in_group(group):
self.secondary_groups.append(group)
return self
def remove_from_group(self, group):
"""Removes the user from the `group` if he is in it.
:param group: The group which should be removed from the user.
"""
if self.in_group(group):
self.secondary_groups.remove(group)
return self
def in_group(self, group):
"""Returns True if the user is in the specified group
:param group: The group which should be checked.
"""
return self.secondary_groups.filter(
groups_users.c.group_id == group.id).count() > 0
@cache.memoize(timeout=max_integer)
def get_permissions(self, exclude=None):
"""Returns a dictionary with all the permissions the user has.
:param exclude: a list with excluded permissions. default is None.
"""
exclude = exclude or []
exclude.extend(['id', 'name', 'description'])
perms = {}
groups = self.secondary_groups.all()
groups.append(self.primary_group)
for group in groups:
for c in group.__table__.columns:
# try if the permission already exists in the dictionary
# and if the permission is true, set it to True
try:
if not perms[c.name] and getattr(group, c.name):
perms[c.name] = True
# if the permission doesn't exist in the dictionary
# add it to the dictionary
except KeyError:
# if the permission is in the exclude list,
# skip to the next permission
if c.name in exclude:
continue
perms[c.name] = getattr(group, c.name)
return perms
def invalidate_cache(self):
"""Invalidates this objects cached metadata."""
cache.delete_memoized(self.get_permissions, self)
def ban(self):
"""Bans the user. Returns True upon success."""
if not self.get_permissions()['banned']:
banned_group = Group.query.filter(
Group.banned == True
).first()
self.primary_group_id = banned_group.id
self.save()
self.invalidate_cache()
return True
return False
def unban(self):
"""Unbans the user. Returns True upon success."""
if self.get_permissions()['banned']:
member_group = Group.query.filter(
Group.admin == False,
Group.super_mod == False,
Group.mod == False,
Group.guest == False,
Group.banned == False
).first()
self.primary_group_id = member_group.id
self.save()
self.invalidate_cache()
return True
return False
def save(self, groups=None):
"""Saves a user. If a list with groups is provided, it will add those
to the secondary groups from the user.
:param groups: A list with groups that should be added to the
secondary groups from user.
"""
if groups:
# TODO: Only remove/add groups that are selected
secondary_groups = self.secondary_groups.all()
for group in secondary_groups:
self.remove_from_group(group)
db.session.commit()
for group in groups:
# Do not add the primary group to the secondary groups
if group.id == self.primary_group_id:
continue
self.add_to_group(group)
self.invalidate_cache()
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes the User."""
# This isn't done automatically...
PrivateMessage.query.filter_by(user_id=self.id).delete()
ForumsRead.query.filter_by(user_id=self.id).delete()
TopicsRead.query.filter_by(user_id=self.id).delete()
db.session.delete(self)
db.session.commit()
return self
class Guest(AnonymousUserMixin):
@property
def permissions(self):
return self.get_permissions()
def get_permissions(self, exclude=None):
"""Returns a dictionary with all permissions the user has"""
exclude = exclude or []
exclude.extend(['id', 'name', 'description'])
perms = {}
# Get the Guest group
group = Group.query.filter_by(guest=True).first()
for c in group.__table__.columns:
if c.name in exclude:
continue
perms[c.name] = getattr(group, c.name)
return perms
class PrivateMessage(db.Model):
__tablename__ = "privatemessages"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
from_user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
to_user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
subject = db.Column(db.String(255))
message = db.Column(db.Text)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
trash = db.Column(db.Boolean, nullable=False, default=False)
draft = db.Column(db.Boolean, nullable=False, default=False)
unread = db.Column(db.Boolean, nullable=False, default=True)
user = db.relationship("User", backref="pms", lazy="joined",
foreign_keys=[user_id])
from_user = db.relationship("User", lazy="joined",
foreign_keys=[from_user_id])
to_user = db.relationship("User", lazy="joined", foreign_keys=[to_user_id])
def save(self, from_user=None, to_user=None, user_id=None, draft=False):
"""Saves a private message.
:param from_user: The user who has sent the message
:param to_user: The user who should recieve the message
:param user_id: The senders user id - This is the id to which user the
Inbox belongs.
:param draft: If the message is a draft. Defaults to ``False``.
"""
if self.id:
db.session.add(self)
db.session.commit()
return self
# Defaults to ``False``.
self.draft = draft
# Add the message to the user's pm box
self.user_id = user_id
self.from_user_id = from_user
self.to_user_id = to_user
self.date_created = datetime.utcnow()
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes a private message"""
db.session.delete(self)
db.session.commit()
return self
|
|
from unittest import mock
import pytest
from opentrons import types
from opentrons import hardware_control as hc
from opentrons.config import robot_configs
from opentrons.hardware_control.types import (
Axis, CriticalPoint, OutOfBoundsMove, MotionChecks)
from opentrons.hardware_control.robot_calibration import (
RobotCalibration, DeckCalibration)
async def test_controller_home(loop, is_robot):
c = await hc.API.build_hardware_simulator(
loop=loop,
config=robot_configs.build_config({}, {}))
await c.home()
assert c._current_position == {Axis.X: 418,
Axis.Y: 353,
Axis.Z: 218,
Axis.A: 218,
Axis.B: 19,
Axis.C: 19}
c._config = c._config._replace(gantry_calibration=[[1, 0, 0, 10],
[0, 1, 0, 20],
[0, 0, 1, 30],
[0, 0, 0, 1]],
mount_offset=[0, 0, 10])
conf = c.config
assert conf.gantry_calibration == [[1, 0, 0, 10],
[0, 1, 0, 20],
[0, 0, 1, 30],
[0, 0, 0, 1]]
await c.home()
# Check that we correctly apply the inverse gantry calibration
assert c._current_position == {Axis.X: 408,
Axis.Y: 333,
Axis.Z: 188,
Axis.A: 188,
Axis.B: 19,
Axis.C: 19}
# Check that we subsequently apply mount offset
assert await c.current_position(types.Mount.RIGHT) == {Axis.X: 408,
Axis.Y: 333,
Axis.A: 188,
Axis.C: 19}
assert await c.current_position(types.Mount.LEFT) == {Axis.X: 408,
Axis.Y: 333,
Axis.Z: 198,
Axis.B: 19}
async def test_controller_musthome(hardware_api):
abs_position = types.Point(30, 20, 10)
mount = types.Mount.RIGHT
home = mock.Mock()
home.side_effect = hardware_api.home
await hardware_api.move_to(mount, abs_position)
assert home.called_once()
async def test_home_specific_sim(hardware_api, monkeypatch, is_robot):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 10, 20))
# Avoid the autoretract when moving two difference instruments
hardware_api._last_moved_mount = None
await hardware_api.move_rel(types.Mount.LEFT, types.Point(0, 0, -20))
await hardware_api.home([Axis.Z, Axis.C])
assert hardware_api._current_position == {Axis.X: 0,
Axis.Y: 10,
Axis.Z: 218,
Axis.A: 20,
Axis.B: 19,
Axis.C: 19}
async def test_retract(hardware_api, toggle_new_calibration):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 10, 20))
await hardware_api.retract(types.Mount.RIGHT, 10)
assert hardware_api._current_position == {Axis.X: 0,
Axis.Y: 10,
Axis.Z: 218,
Axis.A: 218,
Axis.B: 19,
Axis.C: 19}
async def test_move(hardware_api, is_robot, toggle_new_calibration):
abs_position = types.Point(30, 20, 10)
mount = types.Mount.RIGHT
target_position1 = {Axis.X: 30,
Axis.Y: 20,
Axis.Z: 218,
Axis.A: 10,
Axis.B: 19,
Axis.C: 19}
await hardware_api.home()
await hardware_api.move_to(mount, abs_position)
assert hardware_api._current_position == target_position1
# This assert implicitly checks that the mount offset is not applied to
# relative moves; if you change this to move_to, the offset will be
# applied again
rel_position = types.Point(30, 20, -10)
mount2 = types.Mount.LEFT
target_position2 = {Axis.X: 60,
Axis.Y: 40,
Axis.Z: 208,
Axis.A: 218, # The other instrument is retracted
Axis.B: 19,
Axis.C: 19}
await hardware_api.move_rel(mount2, rel_position)
assert hardware_api._current_position == target_position2
async def test_move_extras_passed_through(hardware_api, monkeypatch):
mock_be_move = mock.Mock()
monkeypatch.setattr(hardware_api._backend, 'move', mock_be_move)
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT,
types.Point(0, 0, 0))
assert mock_be_move.call_args_list[0][1]['speed'] is None
assert mock_be_move.call_args_list[0][1]['axis_max_speeds'] == {}
mock_be_move.reset_mock()
await hardware_api.move_to(types.Mount.RIGHT,
types.Point(1, 1, 1),
speed=30,
max_speeds={Axis.X: 10})
assert mock_be_move.call_args_list[0][1]['speed'] == 30
assert mock_be_move.call_args_list[0][1]['axis_max_speeds'] == {'X': 10}
mock_be_move.reset_mock()
await hardware_api.move_rel(types.Mount.LEFT,
types.Point(1, 1, 1),
speed=40,
max_speeds={Axis.Y: 20})
assert mock_be_move.call_args_list[0][1]['speed'] == 40
assert mock_be_move.call_args_list[0][1]['axis_max_speeds'] == {'Y': 20}
async def test_mount_offset_applied(
hardware_api, is_robot, toggle_new_calibration):
await hardware_api.home()
abs_position = types.Point(30, 20, 10)
mount = types.Mount.LEFT
target_position = {Axis.X: 64,
Axis.Y: 20,
Axis.Z: 10,
Axis.A: 218,
Axis.B: 19,
Axis.C: 19}
await hardware_api.move_to(mount, abs_position)
assert hardware_api._current_position == target_position
async def test_critical_point_applied(
hardware_api, monkeypatch, is_robot, toggle_new_calibration):
await hardware_api.home()
hardware_api._backend._attached_instruments\
= {types.Mount.LEFT: {'model': None, 'id': None},
types.Mount.RIGHT: {'model': 'p10_single_v1', 'id': 'testyness'}}
await hardware_api.cache_instruments()
# Our critical point is now the tip of the nozzle
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset = {Axis.X: 0,
Axis.Y: 0,
Axis.Z: 218,
Axis.A: 13, # from pipette-config.json model offset
Axis.B: 19,
Axis.C: 19}
assert hardware_api._current_position == target_no_offset
target = {Axis.X: 0,
Axis.Y: 0,
Axis.A: 0,
Axis.C: 19}
assert await hardware_api.current_position(types.Mount.RIGHT) == target
p10_tip_length = 33
# Specifiying critical point overrides as mount should not use model offset
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0),
critical_point=CriticalPoint.MOUNT)
assert hardware_api._current_position == {Axis.X: 0.0, Axis.Y: 0.0,
Axis.Z: 218,
Axis.A: 0,
Axis.B: 19, Axis.C: 19}
assert await hardware_api.current_position(
types.Mount.RIGHT, critical_point=CriticalPoint.MOUNT)\
== {Axis.X: 0.0, Axis.Y: 0.0, Axis.A: 0, Axis.C: 19}
# Specifying the critical point as nozzle should have the same behavior
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0),
critical_point=CriticalPoint.NOZZLE)
assert hardware_api._current_position == target_no_offset
await hardware_api.pick_up_tip(types.Mount.RIGHT, p10_tip_length)
# Now the current position (with offset applied) should change
# pos_after_pickup + model_offset + critical point
target[Axis.A] = 218 + (-13) + (-1 * p10_tip_length)
target_no_offset[Axis.C] = target[Axis.C] = 2
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# This move should take the new critical point into account
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target_no_offset[Axis.A] = 46
assert hardware_api._current_position == target_no_offset
# But the position with offset should be back to the original
target[Axis.A] = 0
assert await hardware_api.current_position(types.Mount.RIGHT) == target
# And removing the tip should move us back to the original
await hardware_api.move_rel(types.Mount.RIGHT, types.Point(2.5, 0, 0))
await hardware_api.drop_tip(types.Mount.RIGHT)
await hardware_api.home_plunger(types.Mount.RIGHT)
target[Axis.A] = 33 + hc.DROP_TIP_RELEASE_DISTANCE
target_no_offset[Axis.X] = 2.5
target[Axis.X] = 2.5
assert await hardware_api.current_position(types.Mount.RIGHT) == target
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
target[Axis.X] = 0
target_no_offset[Axis.X] = 0
target_no_offset[Axis.A] = 13
target[Axis.A] = 0
assert hardware_api._current_position == target_no_offset
assert await hardware_api.current_position(types.Mount.RIGHT) == target
async def test_deck_cal_applied(monkeypatch, loop):
new_gantry_cal = [[1, 0, 0, 10],
[0, 1, 0, 20],
[0, 0, 1, 30],
[0, 0, 0, 1]]
called_with = None
def mock_move(position, speed=None, home_flagged_axes=True,
axis_max_speeds=None):
nonlocal called_with
called_with = position
hardware_api = await hc.API.build_hardware_simulator(loop=loop)
monkeypatch.setattr(hardware_api._backend, 'move', mock_move)
old_config = hardware_api.config
hardware_api._config = old_config._replace(
gantry_calibration=new_gantry_cal)
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert called_with['X'] == 10
assert called_with['Y'] == 20
assert called_with['A'] == 30
# Check that mount offset is also applied
await hardware_api.move_to(types.Mount.LEFT, types.Point(0, 0, 0))
assert called_with['X'] == 44
assert called_with['Y'] == 20
assert called_with['Z'] == 30
async def test_attitude_deck_cal_applied(
monkeypatch, loop, use_new_calibration):
new_gantry_cal = [
[1.0047, -0.0046, 0.0],
[0.0011, 1.0038, 0.0],
[0.0, 0.0, 1.0]]
called_with = None
def mock_move(position, speed=None, home_flagged_axes=True,
axis_max_speeds=None):
nonlocal called_with
called_with = position
hardware_api = await hc.API.build_hardware_simulator(loop=loop)
monkeypatch.setattr(hardware_api._backend, 'move', mock_move)
deck_cal = RobotCalibration(
deck_calibration=DeckCalibration(attitude=new_gantry_cal))
hardware_api.set_robot_calibration(deck_cal)
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert called_with['X'] == 0.0
assert called_with['Y'] == 0.0
assert called_with['A'] == 0.0
# Check that mount offset is also applied
await hardware_api.move_to(types.Mount.LEFT, types.Point(0, 0, 0))
assert round(called_with['X'], 2) == 34.16
assert round(called_with['Y'], 2) == 0.04
assert round(called_with['Z'], 2) == 0.0
async def test_other_mount_retracted(
hardware_api, is_robot, toggle_new_calibration):
await hardware_api.home()
await hardware_api.move_to(types.Mount.RIGHT, types.Point(0, 0, 0))
assert await hardware_api.gantry_position(types.Mount.RIGHT)\
== types.Point(0, 0, 0)
await hardware_api.move_to(types.Mount.LEFT, types.Point(20, 20, 0))
assert await hardware_api.gantry_position(types.Mount.RIGHT) \
== types.Point(54, 20, 218)
async def test_shake_during_pick_up(
hardware_api, monkeypatch, toggle_new_calibration):
await hardware_api.home()
hardware_api._backend._attached_instruments\
= {types.Mount.LEFT: {'model': None, 'id': None},
types.Mount.RIGHT: {'model': 'p1000_single_v2.0',
'id': 'testyness'}}
await hardware_api.cache_instruments()
shake_tips_pick_up = mock.Mock(
side_effect=hardware_api._shake_off_tips_pick_up)
monkeypatch.setattr(hardware_api, '_shake_off_tips_pick_up',
shake_tips_pick_up)
# Test double shake for after pick up tips
await hardware_api.pick_up_tip(types.Mount.RIGHT, 50)
shake_tip_calls = [mock.call(types.Mount.RIGHT),
mock.call(types.Mount.RIGHT)]
shake_tips_pick_up.assert_has_calls(shake_tip_calls)
move_rel = mock.Mock(side_effect=hardware_api.move_rel)
monkeypatch.setattr(hardware_api, 'move_rel', move_rel)
# Test shakes in X and Y direction with 0.3 mm shake tip distance
shake_tips_pick_up.reset_mock()
await shake_tips_pick_up(types.Mount.RIGHT)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-0.3, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0.6, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-0.3, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, -0.3, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0.6, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, -0.3, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20))]
move_rel.assert_has_calls(move_rel_calls)
async def test_shake_during_drop(
hardware_api, monkeypatch, toggle_new_calibration):
await hardware_api.home()
hardware_api._backend._attached_instruments\
= {types.Mount.LEFT: {'model': None, 'id': None},
types.Mount.RIGHT: {'model': 'p1000_single_v1.5',
'id': 'testyness'}}
await hardware_api.cache_instruments()
await hardware_api.add_tip(types.Mount.RIGHT, 50.0)
hardware_api.set_current_tiprack_diameter(types.Mount.RIGHT, 30.0)
shake_tips_drop = mock.Mock(
side_effect=hardware_api._shake_off_tips_drop)
monkeypatch.setattr(hardware_api, '_shake_off_tips_drop',
shake_tips_drop)
# Test single shake after drop tip
await hardware_api.drop_tip(types.Mount.RIGHT)
shake_tips_drop.assert_called_once_with(types.Mount.RIGHT, 30)
move_rel = mock.Mock(side_effect=hardware_api.move_rel)
monkeypatch.setattr(hardware_api, 'move_rel', move_rel)
# Test drop tip shake with 25% of tiprack well diameter
# between upper (2.25 mm) and lower limit (1.0 mm)
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 2.0*4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(4, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20))]
move_rel.assert_has_calls(move_rel_calls)
# Test drop tip shake with 25% of tiprack well diameter
# over upper (2.25 mm) limit
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 2.3*4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-2.25, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(4.5, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-2.25, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20))]
move_rel.assert_has_calls(move_rel_calls)
# Test drop tip shake with 25% of tiprack well diameter
# below lower (1.0 mm) limit
shake_tips_drop.reset_mock()
await shake_tips_drop(types.Mount.RIGHT, 0.9*4)
move_rel_calls = [
mock.call(types.Mount.RIGHT, types.Point(-1, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(2, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(-1, 0, 0), speed=50),
mock.call(types.Mount.RIGHT, types.Point(0, 0, 20))]
move_rel.assert_has_calls(move_rel_calls)
async def test_move_rel_bounds(
hardware_api, toggle_new_calibration):
with pytest.raises(OutOfBoundsMove):
await hardware_api.move_rel(
types.Mount.RIGHT, types.Point(0, 0, 2000),
check_bounds=MotionChecks.HIGH)
|
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# https://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import ClientError
from boto3 import utils
from boto3.s3.transfer import (
ProgressCallbackInvoker,
S3Transfer,
TransferConfig,
create_transfer_manager,
)
def inject_s3_transfer_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', upload_file)
utils.inject_attribute(class_attributes, 'download_file', download_file)
utils.inject_attribute(class_attributes, 'copy', copy)
utils.inject_attribute(class_attributes, 'upload_fileobj', upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', download_fileobj
)
def inject_bucket_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'load', bucket_load)
utils.inject_attribute(class_attributes, 'upload_file', bucket_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', bucket_download_file
)
utils.inject_attribute(class_attributes, 'copy', bucket_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', bucket_upload_fileobj
)
utils.inject_attribute(
class_attributes, 'download_fileobj', bucket_download_fileobj
)
def inject_object_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', object_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', object_download_file
)
utils.inject_attribute(class_attributes, 'copy', object_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', object_upload_fileobj
)
utils.inject_attribute(
class_attributes, 'download_fileobj', object_download_fileobj
)
def inject_object_summary_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'load', object_summary_load)
def bucket_load(self, *args, **kwargs):
"""
Calls s3.Client.list_buckets() to update the attributes of the Bucket
resource.
"""
# The docstring above is phrased this way to match what the autogenerated
# docs produce.
# We can't actually get the bucket's attributes from a HeadBucket,
# so we need to use a ListBuckets and search for our bucket.
# However, we may fail if we lack permissions to ListBuckets
# or the bucket is in another account. In which case, creation_date
# will be None.
self.meta.data = {}
try:
response = self.meta.client.list_buckets()
for bucket_data in response['Buckets']:
if bucket_data['Name'] == self.name:
self.meta.data = bucket_data
break
except ClientError as e:
if not e.response.get('Error', {}).get('Code') == 'AccessDenied':
raise
def object_summary_load(self, *args, **kwargs):
"""
Calls s3.Client.head_object to update the attributes of the ObjectSummary
resource.
"""
response = self.meta.client.head_object(
Bucket=self.bucket_name, Key=self.key
)
if 'ContentLength' in response:
response['Size'] = response.pop('ContentLength')
self.meta.data = response
def upload_file(
self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Filename: str
:param Filename: The path to the file to upload.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
with S3Transfer(self, Config) as transfer:
return transfer.upload_file(
filename=Filename,
bucket=Bucket,
key=Key,
extra_args=ExtraArgs,
callback=Callback,
)
def download_file(
self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None
):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type Filename: str
:param Filename: The path to the file to download to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
with S3Transfer(self, Config) as transfer:
return transfer.download_file(
bucket=Bucket,
key=Key,
filename=Filename,
extra_args=ExtraArgs,
callback=Callback,
)
def bucket_upload_file(
self, Filename, Key, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').upload_file('/tmp/hello.txt', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Filename: str
:param Filename: The path to the file to upload.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
return self.meta.client.upload_file(
Filename=Filename,
Bucket=self.name,
Key=Key,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def bucket_download_file(
self, Key, Filename, ExtraArgs=None, Callback=None, Config=None
):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').download_file('hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Key: str
:param Key: The name of the key to download from.
:type Filename: str
:param Filename: The path to the file to download to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
return self.meta.client.download_file(
Bucket=self.name,
Key=Key,
Filename=Filename,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def object_upload_file(
self, Filename, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Object('mybucket', 'hello.txt').upload_file('/tmp/hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Filename: str
:param Filename: The path to the file to upload.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
return self.meta.client.upload_file(
Filename=Filename,
Bucket=self.bucket_name,
Key=self.key,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def object_download_file(
self, Filename, ExtraArgs=None, Callback=None, Config=None
):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Object('mybucket', 'hello.txt').download_file('/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
:type Filename: str
:param Filename: The path to the file to download to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
transfer.
"""
return self.meta.client.download_file(
Bucket=self.bucket_name,
Key=self.key,
Filename=Filename,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def copy(
self,
CopySource,
Bucket,
Key,
ExtraArgs=None,
Callback=None,
SourceClient=None,
Config=None,
):
"""Copy an object from one S3 location to another.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Bucket: str
:param Bucket: The name of the bucket to copy to
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.copy(
copy_source=CopySource,
bucket=Bucket,
key=Key,
extra_args=ExtraArgs,
subscribers=subscribers,
source_client=SourceClient,
)
return future.result()
def bucket_copy(
self,
CopySource,
Key,
ExtraArgs=None,
Callback=None,
SourceClient=None,
Config=None,
):
"""Copy an object from one S3 location to an object in this bucket.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
bucket.copy(copy_source, 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource,
Bucket=self.name,
Key=Key,
ExtraArgs=ExtraArgs,
Callback=Callback,
SourceClient=SourceClient,
Config=Config,
)
def object_copy(
self,
CopySource,
ExtraArgs=None,
Callback=None,
SourceClient=None,
Config=None,
):
"""Copy an object from one S3 location to this object.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
obj = bucket.Object('otherkey')
obj.copy(copy_source)
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource,
Bucket=self.bucket_name,
Key=self.key,
ExtraArgs=ExtraArgs,
Callback=Callback,
SourceClient=SourceClient,
Config=Config,
)
def upload_fileobj(
self, Fileobj, Bucket, Key, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
if not hasattr(Fileobj, 'read'):
raise ValueError('Fileobj must implement read')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.upload(
fileobj=Fileobj,
bucket=Bucket,
key=Key,
extra_args=ExtraArgs,
subscribers=subscribers,
)
return future.result()
def bucket_upload_fileobj(
self, Fileobj, Key, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file-like object to this bucket.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'rb') as data:
bucket.upload_fileobj(data, 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj,
Bucket=self.name,
Key=Key,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def object_upload_fileobj(
self, Fileobj, ExtraArgs=None, Callback=None, Config=None
):
"""Upload a file-like object to this object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'rb') as data:
obj.upload_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed upload arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj,
Bucket=self.bucket_name,
Key=self.key,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def download_fileobj(
self, Bucket, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None
):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
if not hasattr(Fileobj, 'write'):
raise ValueError('Fileobj must implement write')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.download(
bucket=Bucket,
key=Key,
fileobj=Fileobj,
extra_args=ExtraArgs,
subscribers=subscribers,
)
return future.result()
def bucket_download_fileobj(
self, Key, Fileobj, ExtraArgs=None, Callback=None, Config=None
):
"""Download an object from this bucket to a file-like-object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'wb') as data:
bucket.download_fileobj('mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.name,
Key=Key,
Fileobj=Fileobj,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
def object_download_fileobj(
self, Fileobj, ExtraArgs=None, Callback=None, Config=None
):
"""Download this object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'wb') as data:
obj.download_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation. For allowed download arguments see
boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS.
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.bucket_name,
Key=self.key,
Fileobj=Fileobj,
ExtraArgs=ExtraArgs,
Callback=Callback,
Config=Config,
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for locally-connected layers."""
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.training.rmsprop import RMSPropOptimizer
_DATA_FORMAT_PADDING_IMPLEMENTATION = [{
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 3
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 3
}]
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
for strides in [1]:
if padding == 'same' and strides != 1:
continue
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'padding': padding,
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected1D,
kwargs=kwargs,
input_shape=(num_samples, num_steps, input_dim))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d_regularization(self, data_format, padding,
implementation):
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_steps, input_dim))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
kwargs = {
'filters': filters,
'kernel_size': 3,
'padding': padding,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_channels_first(self, data_format, padding,
implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
kwargs = {
'filters': filters,
'kernel_size': 3,
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_regularization(self, data_format, padding,
implementation):
num_samples = 2
filters = 3
stack_size = 4
num_row = 6
num_col = 7
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'implementation': implementation,
'padding': padding,
'data_format': data_format
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_row, num_col, stack_size))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LocallyConnectedImplementationModeTest(test.TestCase,
parameterized.TestCase):
@parameterized.parameters([
{'width': 1, 'data_format': 'channels_first'},
{'width': 1, 'data_format': 'channels_last'},
{'width': 6, 'data_format': 'channels_first'},
{'width': 6, 'data_format': 'channels_last'},
])
def test_locallyconnected_implementation(self, width, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
tf_test_util.random_seed.set_seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
height = 7
filters = 2
inputs = get_inputs(data_format, filters, height, num_samples, width)
kernel_x = (3,)
kernel_y = () if width == 1 else (2,)
stride_x = (1,)
stride_y = () if width == 1 else (3,)
layers = 2
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model(implementation=1, **kwargs)
model_2 = get_model(implementation=2, **kwargs)
model_3 = get_model(implementation=3, **kwargs)
# Build models.
model_1.train_on_batch(inputs, targets)
model_2.train_on_batch(inputs, targets)
model_3.train_on_batch(inputs, targets)
# Copy weights.
copy_model_weights(model_from=model_2, model_to=model_1)
copy_model_weights(model_from=model_2, model_to=model_3)
# Compare outputs at initialization.
out_1 = model_1(inputs)
out_2 = model_2(inputs)
out_3 = model_3(inputs)
self.assertAllCloseAccordingToType(
out_2, out_1, rtol=1e-5, atol=1e-5)
self.assertAllCloseAccordingToType(
out_2, out_3, rtol=1e-5, atol=1e-5)
self.assertAllCloseAccordingToType(
out_1, out_3, rtol=1e-5, atol=1e-5)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_3.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
# Compare outputs after a few training steps.
out_1 = model_1(inputs)
out_2 = model_2(inputs)
out_3 = model_3(inputs)
self.assertAllCloseAccordingToType(
out_2, out_1, atol=2e-4)
self.assertAllCloseAccordingToType(
out_2, out_3, atol=2e-4)
self.assertAllCloseAccordingToType(
out_1, out_3, atol=2e-4)
@parameterized.parameters([
{
'width': 1,
'data_format': 'channels_first'
},
{
'width': 1,
'data_format': 'channels_last'
},
{
'width': 6,
'data_format': 'channels_first'
},
{
'width': 6,
'data_format': 'channels_last'
},
])
def test_locallyconnected_save(self, width, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
tf_test_util.random_seed.set_seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
height = 7
filters = 2
inputs = get_inputs(data_format, filters, height, num_samples, width)
kernel_x = (3,)
kernel_y = () if width == 1 else (2,)
stride_x = (1,)
stride_y = () if width == 1 else (3,)
layers = 2
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model_saveable(implementation=1, **kwargs)
model_2 = get_model_saveable(implementation=2, **kwargs)
model_3 = get_model_saveable(implementation=3, **kwargs)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
model_3.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples,
shuffle=False)
out_1_before = model_1(inputs)
out_2_before = model_2(inputs)
out_3_before = model_3(inputs)
path_1 = os.path.join(self.get_temp_dir(), 'model_1_path')
model_1.save(path_1)
model_1 = keras.models.load_model(path_1, custom_objects={'xent': xent})
path_2 = os.path.join(self.get_temp_dir(), 'model_2_path')
model_2.save(path_2)
model_2 = keras.models.load_model(path_2, custom_objects={'xent': xent})
path_3 = os.path.join(self.get_temp_dir(), 'model_3_path')
model_3.save(path_3)
model_3 = keras.models.load_model(path_3, custom_objects={'xent': xent})
out_1_after = model_1(inputs)
out_2_after = model_2(inputs)
out_3_after = model_3(inputs)
self.assertAllCloseAccordingToType(out_1_before, out_1_after, atol=2e-4)
self.assertAllCloseAccordingToType(out_2_before, out_2_after, atol=2e-4)
self.assertAllCloseAccordingToType(out_3_before, out_3_after, atol=2e-4)
def test_make_2d(self):
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
np.random.seed(1)
for input_shape in input_shapes:
inputs = np.random.normal(0, 1, input_shape)
inputs_tf = keras.backend.variable(inputs)
split_dim = np.random.randint(0, inputs.ndim + 1)
shape_2d = (int(np.prod(inputs.shape[:split_dim])),
int(np.prod(inputs.shape[split_dim:])))
inputs_2d = np.reshape(inputs, shape_2d)
inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim)
inputs_2d_tf = keras.backend.get_value(inputs_2d_tf)
self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf)
def get_inputs(data_format, filters, height, num_samples, width):
if data_format == 'channels_first':
if width == 1:
input_shape = (filters, height)
else:
input_shape = (filters, height, width)
elif data_format == 'channels_last':
if width == 1:
input_shape = (height, filters)
else:
input_shape = (height, width, filters)
else:
raise NotImplementedError(data_format)
inputs = np.random.normal(0, 1,
(num_samples,) + input_shape).astype(np.float32)
return inputs
def xent(y_true, y_pred):
y_true = keras.backend.cast(
keras.backend.reshape(y_true, (-1,)),
dtypes.int32)
return nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true,
logits=y_pred)
def get_model(implementation,
filters,
kernel_size,
strides,
layers,
num_classes,
data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=RMSPropOptimizer(0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent
)
return model
def get_model_saveable(implementation, filters, kernel_size, strides, layers,
num_classes, data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(
lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=rmsprop.RMSProp(learning_rate=0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent)
return model
def copy_lc_weights_2_to_1(lc_layer_2_from, lc_layer_1_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
data_format = lc_layer_2_from.data_format
if data_format == 'channels_first':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (3, 0, 1, 2)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (4, 5, 0, 1, 2, 3)
else:
raise NotImplementedError(lc_layer_2_from)
elif data_format == 'channels_last':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (2, 0, 1, 3)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (3, 4, 0, 1, 2, 5)
else:
raise NotImplementedError(lc_layer_2_from)
else:
raise NotImplementedError(data_format)
lc_2_kernel_masked = keras.backend.permute_dimensions(
lc_2_kernel_masked, permutation)
lc_2_kernel_mask = math_ops.not_equal(
lc_2_kernel_masked, 0)
lc_2_kernel_flat = array_ops.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,
lc_layer_1_to.kernel.shape)
lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])
def copy_lc_weights_2_to_3(lc_layer_2_from, lc_layer_3_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
lc_2_kernel_masked = keras.layers.local.make_2d(
lc_2_kernel_masked, split_dim=keras.backend.ndim(lc_2_kernel_masked) // 2)
lc_2_kernel_masked = keras.backend.transpose(lc_2_kernel_masked)
lc_2_kernel_mask = math_ops.not_equal(lc_2_kernel_masked, 0)
lc_2_kernel_flat = array_ops.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_flat = keras.backend.get_value(lc_2_kernel_flat)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_3_to.set_weights([lc_2_kernel_flat, lc_2_bias])
def copy_model_weights(model_from, model_to):
for l in range(len(model_from.layers)):
layer_from = model_from.layers[l]
layer_to = model_to.layers[l]
if (isinstance(
layer_from,
(keras.layers.LocallyConnected2D, keras.layers.LocallyConnected1D)) and
isinstance(layer_to, (keras.layers.LocallyConnected2D,
keras.layers.LocallyConnected1D))):
if layer_from.implementation == 2:
if layer_to.implementation == 1:
copy_lc_weights_2_to_1(layer_from, layer_to)
elif layer_to.implementation == 3:
copy_lc_weights_2_to_3(layer_from, layer_to)
else:
raise NotImplementedError
else:
raise NotImplementedError
elif isinstance(layer_from, keras.layers.Dense):
weights_2, bias_2 = layer_from.weights
weights_2 = keras.backend.get_value(weights_2)
bias_2 = keras.backend.get_value(bias_2)
layer_to.set_weights([weights_2, bias_2])
else:
continue
if __name__ == '__main__':
test.main()
|
|
from ctrl_config import *
import time
def stop_actuator(actuators,stop_action):
for actuator in actuators:
actuator.stop(stop_action=stop_action)
#moving motors A,D
## TURNING
###############################################
def turn_18deg_step(actuator1,actuator2,speed_sp=SPEED_TURN,time_sp=TIME_TURN):
#Turn robot 1 step, i.e. 18 degrees
actuator1.run_timed(time_sp=time_sp,speed_sp=speed_sp,stop_action='brake')
actuator2.run_timed(time_sp=time_sp,speed_sp=-speed_sp,stop_action='brake')
def forward_1_step_time(actuator1,actuator2,speed_sp=SPEED_FWD,time_sp=TIME_FWD):
actuator1.run_timed(time_sp=time_sp,speed_sp=speed_sp,stop_action='brake')
actuator2.run_timed(time_sp=time_sp,speed_sp=speed_sp,stop_action='brake')
def turn_deg_position(actuator1, actuator2,position, speed_sp=SPEED_TURN,time_sp=TIME_TURN):
#4.2 degrees with position 25
actuator1.run_to_rel_pos(position_sp=position, speed_sp=speed_sp,stop_action='hold')
actuator2.run_to_rel_pos(position_sp=-position, speed_sp=speed_sp,stop_action='hold')
def turn_right_deg(actuator1, actuator2, degrees):
if LOG_ON==1:
print("MC: Rotate R for "+str(degrees) + " new position " + str((degrees)*TURN_RIGHT_TICKS_PER_DEG))
print("MC: position before "+ str(actuator1.position) + " " + str(actuator2.position))
turn_deg_position(actuator1, actuator2, (degrees*TURN_RIGHT_TICKS_PER_DEG))
if LOG_ON==1:
print("MC: position after "+ str(actuator1.position) + " " + str(actuator2.position))
def turn_left_deg(actuator1, actuator2, degrees):
turn_deg_position(actuator1, actuator2, -(degrees*TURN_LEFT_TICKS_PER_DEG))
## TRANSLATION
#############################################
def backward_1_step_time(actuator1,actuator2,speed_sp=SPEED_BWD,time_sp=TIME_BWD):
actuator1.run_timed(time_sp=time_sp,speed_sp=-speed_sp)
actuator2.run_timed(time_sp=time_sp,speed_sp=-speed_sp)
def forward_position(actuator1,actuator2,position,speed_sp=SPEED_FWD,time_sp=TIME_FWD):
# 1 step is 10cm with position=360
actuator1.polarity = 'normal'
actuator2.polarity = 'normal'
actuator1.run_to_rel_pos(position_sp=position,speed_sp=speed_sp,stop_action='hold')
actuator2.run_to_rel_pos(position_sp=position,speed_sp=speed_sp,stop_action='hold')
def backward_position(actuator1,actuator2,position,speed_sp=SPEED_BWD,time_sp=TIME_BWD):
# 1 step is 3cm with position=100
actuator1.polarity = 'inversed'
actuator2.polarity = 'inversed'
actuator1.run_to_rel_pos(position_sp=position,speed_sp=speed_sp,stop_action='hold')
actuator2.run_to_rel_pos(position_sp=position,speed_sp=speed_sp,stop_action='hold')
def forward_cm(actuator1, actuator2, cm):
if LOG_ON==1:
print("MC: Move for "+str(cm) + " new position " + str(cm*FORWARD_TICKS_PER_CM))
print("MC: position before "+ str(actuator1.position) + " " + str(actuator2.position))
forward_position(actuator1,actuator2, cm*FORWARD_TICKS_PER_CM)
time.sleep(cm*0.15)
if LOG_ON==1:
print("MC: position after "+ str(actuator1.position) + " " + str(actuator2.position))
def backward_cm(actuator1, actuator2, cm):
if LOG_ON==1:
print("MC: Move for "+str(cm) + " new position " + str(cm*BACKWARD_TICKS_PER_CM))
print("MC: position before "+ str(actuator1.position) + " " + str(actuator2.position))
backward_position(actuator1,actuator2, cm*BACKWARD_TICKS_PER_CM)
time.sleep(cm*0.15)
if LOG_ON==1:
print("MC: position after "+ str(actuator1.position) + " " + str(actuator2.position))
#manipulation motors C,D
# GRIPPER
################################################
# OPEN - CLOSE
################################################
def open_gripper_abs_position(actuator,position,speed_sp=SPEED_GRIP_OPEN,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before opening " + str(actuator.position))
actuator.polarity = 'normal'
actuator.stop_action = 'hold'
actuator.run_to_abs_pos(position_sp=GRIP_OPEN_POS,speed_sp=speed_sp, stop_action='hold')
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after" + str(actuator.position))
def close_gripper_abs_position(actuator,position,speed_sp=SPEED_GRIP_CLOSE,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before closing " + str(actuator.position))
# actuator.polarity = 'inversed'
actuator.stop_action = 'hold'
actuator.run_to_abs_pos(position_sp=GRIP_CLOSE_POS,speed_sp=speed_sp, stop_action='hold')
# actuator.polarity = 'normal'
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after " + str(actuator.position))
def open_gripper_position(actuator,position,speed_sp=SPEED_GRIP_OPEN,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before opening " + str(actuator.position))
actuator.polarity = 'normal'
actuator.stop_action = 'hold'
actuator.run_to_rel_pos(position_sp=position,speed_sp=speed_sp)
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after" + str(actuator.position))
def close_gripper_position(actuator,position,speed_sp=SPEED_GRIP_CLOSE,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before closing " + str(actuator.position))
actuator.polarity = 'inversed'
actuator.stop_action = 'hold'
actuator.run_to_rel_pos(position_sp=position,speed_sp=speed_sp)
actuator.polarity = 'normal'
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after " + str(actuator.position))
def open_gripper_full(actuator, position):
#for i in range(7):
#print(i)
open_gripper_position(actuator, position)
# time.sleep(0.2)
def close_gripper_full(actuator, position):
#for i in range(7):
#print(i)
close_gripper_position(actuator, position)
def open_gripper_time(actuator,speed_sp=SPEED_GRIP_OPEN,time_sp=TIME_GRIP_OPEN):
actuator.run_timed(time_sp=time_sp,speed_sp=speed_sp)
def close_gripper_time(actuator,speed_sp=SPEED_GRIP_CLOSE,time_sp=TIME_GRIP_CLOSE):
actuator.run_timed(time_sp=time_sp,speed_sp=speed_sp)
# UP - DOWN
###############################################
def lift_gripper_position(actuator,position,speed_sp=SPEED_GRIP_UP,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before lifting " + str(actuator.position))
actuator.polarity = 'normal'
actuator.stop_action = 'hold'
actuator.run_to_rel_pos(position_sp=position,speed_sp=speed_sp)
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after lifting " + str(actuator.position))
def lower_gripper_position(actuator,position,speed_sp=SPEED_GRIP_DOWN,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before lowering " + str(actuator.position))
actuator.polarity = 'inversed'
actuator.stop_action = 'hold'
actuator.run_to_rel_pos(position_sp=position,speed_sp=speed_sp)
actuator.polarity = 'normal'
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after lowering " + str(actuator.position))
def lift_gripper_abs_position(actuator,position,speed_sp=SPEED_GRIP_UP,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before lifting " + str(actuator.position))
actuator.polarity = 'normal'
actuator.stop_action = 'hold'
actuator.run_to_abs_pos(position_sp=GRIP_UPPER_POS,speed_sp=speed_sp)
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after lifting " + str(actuator.position))
def lower_gripper_abs_position(actuator,position,speed_sp=SPEED_GRIP_DOWN,time_sp=None):
if LOG_ON==1:
print("MC: Gripper position before lowering " + str(actuator.position))
# actuator.polarity = 'inversed'
actuator.stop_action = 'hold'
actuator.run_to_abs_pos(position_sp=GRIP_LOWER_POS,speed_sp=speed_sp)
# actuator.polarity = 'normal'
time.sleep(1)
if LOG_ON==1:
print("... and gripper position after lowering " + str(actuator.position))
def lift_gripper_time(actuator,speed_sp=SPEED_GRIP_UP,time_sp=TIME_GRIP_UP):
actuator.run_timed(time_sp=time_sp,speed_sp=speed_sp,stop_action='hold')
def lower_gripper_time(actuator,speed_sp=SPEED_GRIP_DOWN,time_sp=TIME_GRIP_UP):
actuator.run_timed(time_sp=time_sp,speed_sp=speed_sp,stop_action='hold')
# BEHAVIORS - TODO: go to a different file
#############################################
def move_and_grab(actuator1, actuator2, actuator3, actuator4):
lower_gripper_position(actuator4, 60)
time.sleep(2)
forward_position(actuator1,actuator2, 600)
time.sleep(2)
lower_gripper_position(actuator4, 30)
time.sleep(2)
close_gripper_full(actuator3, 5*100)
time.sleep(2)
lift_gripper_position(actuator4, 100)
time.sleep(2)
forward_position(actuator1,actuator2, 300)
time.sleep(2)
open_gripper_full(actuator3, 3*100)
time.sleep(2)
backward_position(actuator1, actuator2, 300)
time.sleep(2)
def move_to_and_grab(actuator1, actuator2, actuator3, actuator4):
time.sleep(2)
forward_position(actuator1,actuator2, 300)
time.sleep(2)
lower_gripper_position(actuator4, 30)
time.sleep(2)
close_gripper_full(actuator3, 5*100)
time.sleep(2)
lift_gripper_position(actuator4, 100)
time.sleep(2)
#forward_1_step_position(actuator1,actuator2, 300)
#time.sleep(2)
backward_position(actuator1, actuator2, 300)
time.sleep(2)
turn_right_deg(actuator1, actuator2,90)
time.sleep(2)
open_gripper_full(actuator3, 3*100)
time.sleep(2)
def check_if_in_gripper(distance):
if distance < DISTANCE_LIMIT_CM:
return True
else:
return False
def move_towards_object(actuator1, actuator2, actuator3, actuator4, distance, angle):
offset = 10
percentage = 1.0
turn_right_deg(actuator1, actuator2, angle)
time.sleep(2)
if check_if_in_gripper(mm_to_cm(distance)):
print("MC: In gripper distance")
#forward_cm(actuator1, actuator2, mm_to_cm(distance)+offset)
move_to_and_grab(actuator1, actuator2, actuator3, actuator4)
else:
print("MC: move towards object "+ str(percentage*mm_to_cm(distance)))
forward_cm(actuator1, actuator2, percentage*mm_to_cm(distance))
move_to_and_grab(actuator1, actuator2, actuator3, actuator4)
lower_gripper_position(actuator4, 80)
open_gripper_full(actuator3, 100)
def mm_to_cm(mm):
return (mm/10)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,line-too-long
"""Various type definitions to help instantiate CUTLASS kernels."""
import re
import enum
from enum import auto as enum_auto
class GeneratorTarget(enum.Enum):
Library = enum_auto()
class DataType(enum.Enum):
f16 = enum_auto()
f32 = enum_auto()
s8 = enum_auto()
u8 = enum_auto()
s32 = enum_auto()
ShortDataTypeNames = {
DataType.f16: "h",
DataType.f32: "s",
DataType.s32: "i",
}
DataTypeNames = {
DataType.f16: "f16",
DataType.f32: "f32",
DataType.s8: "s8",
DataType.u8: "u8",
DataType.s32: "s32",
}
DataTypeTag = {
DataType.f16: "cutlass::half_t",
DataType.f32: "float",
DataType.s8: "int8_t",
DataType.s32: "int32_t",
DataType.u8: "uint8_t",
}
DataTypeSize = {
DataType.f16: 16,
DataType.f32: 32,
DataType.u8: 8,
DataType.s8: 8,
DataType.s32: 32,
}
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
multiply_add_fast_f32 = enum_auto()
MathOperationTag = {
MathOperation.multiply_add: "cutlass::arch::OpMultiplyAdd",
MathOperation.multiply_add_saturate: "cutlass::arch::OpMultiplyAddSaturate",
MathOperation.multiply_add_fast_f32: "cutlass::arch::OpMultiplyAddFastF32",
}
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
TensorNHWC = enum_auto()
LayoutTag = {
LayoutType.ColumnMajor: "cutlass::layout::ColumnMajor",
LayoutType.RowMajor: "cutlass::layout::RowMajor",
LayoutType.TensorNHWC: "cutlass::layout::TensorNHWC",
}
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.TensorNHWC: LayoutType.TensorNHWC,
}
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: "n",
LayoutType.RowMajor: "t",
LayoutType.TensorNHWC: "nhwc",
}
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: "simt",
OpcodeClass.TensorOp: "tensorop",
OpcodeClass.WmmaTensorOp: "wmma_tensorop",
}
OpcodeClassTag = {
OpcodeClass.Simt: "cutlass::arch::OpClassSimt",
OpcodeClass.TensorOp: "cutlass::arch::OpClassTensorOp",
OpcodeClass.WmmaTensorOp: "cutlass::arch::OpClassWmmaTensorOp",
}
class OperationKind(enum.Enum):
Gemm = enum_auto()
Conv2d = enum_auto()
OperationKindNames = {OperationKind.Gemm: "gemm", OperationKind.Conv2d: "conv2d"}
class Target(enum.Enum):
library = enum_auto()
def substitute_template(template, values):
"""Instantiate a kernel template using `values`."""
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
class GemmKind(enum.Enum):
Gemm = enum_auto()
GemmKindNames = {
GemmKind.Gemm: "gemm",
}
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationRelu = enum_auto()
LinearCombinationBias = enum_auto()
LinearCombinationGelu = enum_auto()
LinearCombinationSigmoid = enum_auto()
LinearCombinationSilu = enum_auto()
LinearCombinationHardSwish = enum_auto()
LinearCombinationResidualBlock = enum_auto()
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: "cutlass::epilogue::thread::LinearCombination",
EpilogueFunctor.LinearCombinationRelu: "cutlass::epilogue::thread::LinearCombinationRelu",
EpilogueFunctor.LinearCombinationBias: "cutlass::epilogue::thread::LinearCombination",
EpilogueFunctor.LinearCombinationGelu: "cutlass::epilogue::thread::LinearCombinationGELU",
EpilogueFunctor.LinearCombinationSigmoid: "cutlass::epilogue::thread::LinearCombinationSigmoid",
EpilogueFunctor.LinearCombinationSilu: "cutlass::epilogue::thread::LinearCombinationSilu",
EpilogueFunctor.LinearCombinationHardSwish: "cutlass::epilogue::thread::LinearCombinationHardSwish",
EpilogueFunctor.LinearCombinationResidualBlock: "cutlass::epilogue::thread::LinearCombinationResidualBlock",
}
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Batched = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.Identity2: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>",
SwizzlingFunctor.Identity4: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>",
SwizzlingFunctor.Identity8: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>",
SwizzlingFunctor.Batched: "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle",
SwizzlingFunctor.StridedDgradIdentity1: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.StridedDgradIdentity4: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>",
}
class ConvKind(enum.Enum):
Fprop = enum_auto()
Dgrad = enum_auto()
Wgrad = enum_auto()
ConvKindTag = {
ConvKind.Fprop: "cutlass::conv::Operator::kFprop",
ConvKind.Dgrad: "cutlass::conv::Operator::kDgrad",
ConvKind.Wgrad: "cutlass::conv::Operator::kWgrad",
}
ConvKindNames = {
ConvKind.Fprop: "fprop",
ConvKind.Dgrad: "dgrad",
ConvKind.Wgrad: "wgrad",
}
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
StrideSupportTag = {
StrideSupport.Strided: "cutlass::conv::StrideSupport::kStrided",
StrideSupport.Unity: "cutlass::conv::StrideSupport::kUnity",
}
StrideSupportNames = {
StrideSupport.Strided: "",
StrideSupport.Unity: "unity_stride",
}
class IteratorAlgorithm(enum.Enum):
Analytic = enum_auto()
Optimized = enum_auto()
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: "cutlass::conv::IteratorAlgorithm::kAnalytic",
IteratorAlgorithm.Optimized: "cutlass::conv::IteratorAlgorithm::kOptimized",
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: "analytic",
IteratorAlgorithm.Optimized: "optimized",
}
class MathInstruction:
"""Describe characteristics of a math instruction."""
def __init__(
self,
instruction_shape,
element_a,
element_b,
element_c,
element_accumulator,
opcode_class,
math_operation=MathOperation.multiply_add,
):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_c = element_c
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
class TileDescription:
"""Describe characteristics of a GEMM tile."""
def __init__(
self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute
):
self.threadblock_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
return "%dx%d_%dx%d" % (
self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.stages,
)
class TensorDescription:
def __init__(self, element, layout, alignment=1):
self.element = element
self.layout = layout
self.alignment = alignment
|
|
"""
A TiddlyWeb plugin for providing unauthed access to private resources
using "unguessable" URIs.
A URI at a uuid provides an id for a mapping to another URI, internal
to the tiddlyweb server, with the active user being "faked".
This works out okay because:
* only GET is supported
* there's no state that gets carried to the next request
Tiddlers in a bag called PRIVATEER are used to maintain the mappings.
The title of the tiddler is the uuid. The tiddler has two fields:
* uri: the mapped to uri
* user: the user to proxy the action as
An authenticated user can create a new mapping by making a POST
to /_ as either a JSON dictionary with a 'uri' key, or a CGI form
with a uri parameter.
URIs are not checked, you can store what you like and the system
will happily do the internal redirect to it. If junk is stored, a
404 will result.
An authenticated user can list their own mappings by doing a GET to
/_. A JSON dictionary of mappings to uris is returned. Only those
mappings which have a user that matches the currently active user
will be shown.
A user can delete a mapping by sending DELETE to the URI.
Copyright 2010 Chris Dent <cdent@peemore.com>
Licensed as TiddlyWeb, using the BSD License.
"""
__version__ = '0.7'
import simplejson
import urlparse
import uuid
from httpexceptor import HTTP404, HTTP400
from tiddlyweb.control import filter_tiddlers
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.user import User
from tiddlyweb.store import StoreError
from tiddlyweb.web.query import Query
from tiddlyweb.web.negotiate import figure_type
from tiddlyweb.web.util import server_base_url
from tiddlywebplugins.utils import require_any_user, ensure_bag
MAPPING_BAG = 'PRIVATEER'
POLICY = dict(read=['NONE'], write=['NONE'], create=['NONE'],
manage=['NONE'], accept=['NONE'])
def init(config):
"""
Install selector routes.
"""
if 'selector' in config:
config['selector'].add('/_/{identifier:segment}',
GET=map_to_private, DELETE=delete_mapping)
config['selector'].add('/_', GET=mapping_list,
POST=make_mapping)
@require_any_user()
def delete_mapping(environ, start_response):
"""
Delete an existing mapping if:
* the mapping exists
* the current user and the user in the mapping are the same
"""
identifier = environ['wsgiorg.routing_args'][1]['identifier']
current_user = environ['tiddlyweb.usersign']['name']
store = environ['tiddlyweb.store']
try:
tiddler = Tiddler(identifier, MAPPING_BAG)
tiddler = store.get(tiddler)
tiddler_user = tiddler.fields['user']
if current_user != tiddler_user:
raise HTTP404('resource unavailable') # obscure user mismatch
store.delete(tiddler)
except StoreError:
raise HTTP404('resource not found')
start_response('204 No Content', [])
return []
@require_any_user()
def make_mapping(environ, start_response):
"""
Establishing a mapping, storing the provided URI
as a field on a tiddler in the PRIVATEER bag.
Accepted data is either a json dictory with a uri
key or a POST CGI form with a uri query paramter.
Respond with a location header containing the uri
of the mapping.
"""
uri = None
try:
content_type = environ['tiddlyweb.type']
except KeyError:
content_type = None
if content_type == 'application/json':
try:
length = environ['CONTENT_LENGTH']
content = environ['wsgi.input'].read(int(length))
data = simplejson.loads(content)
uri = data['uri']
except (KeyError, IOError, simplejson.JSONDecodeError), exc:
raise HTTP400('Unable to parse input: %s' % exc)
else:
try:
uri = environ['tiddlyweb.query']['uri'][0]
except (KeyError, IndexError), exc:
raise HTTP400('Unable to parse input: %s' % exc)
if uri:
title_uuid = _make_mapping_tiddler(environ, uri)
else:
raise HTTP400('No uri for mapping provided')
start_response('201 Created', [
('Location', _mapping_uri(environ, title_uuid))])
return []
@require_any_user()
def mapping_list(environ, start_response):
"""
List the mappings for the current user as a JSON
dictionary: mapping uri -> mapped uri.
Matching is done based on the user field of the tiddlers
in the PRIVATEER bag.
"""
current_user = environ['tiddlyweb.usersign']['name']
store = environ['tiddlyweb.store']
try:
bag = Bag(MAPPING_BAG)
tiddlers = filter_tiddlers(store.list_bag_tiddlers(bag),
'select=user:%s' % current_user, environ=environ)
results = {}
for tiddler in tiddlers:
tiddler = store.get(tiddler)
results[_mapping_uri(environ,
tiddler.title)] = tiddler.fields['uri']
except StoreError, exc:
raise HTTP404('Unable to list mappings: %s' % exc)
output = simplejson.dumps(results)
start_response('200 OK', [
('Content-Type', 'application/json')])
return [output]
def map_to_private(environ, start_response):
"""
Internally redirect a mapping uri to the mapped uri.
This is done by supplying the mapped uri to the selector
application for dispatch, but first redoing Query and Negotiate
handling so that query parameters and content negotiation apply
based on the mapped uri, not from the mapping uri.
"""
identifier = environ['wsgiorg.routing_args'][1]['identifier']
host, target_uri, user = _map_to_uri(environ, identifier)
environ['tiddlyweb.usersign'] = _proxy_user(environ, user)
try:
target_uri, query_string = target_uri.split('?', 1)
environ['QUERY_STRING'] = query_string.encode('utf-8')
except ValueError: # no ?
pass
if host:
environ['HTTP_HOST'] = host.encode('utf-8')
environ['PATH_INFO'] = target_uri.encode('utf-8')
environ['SCRIPT_NAME'] = ''
# reparse the query string into tiddlyweb.query and filters
Query(None).extract_query(environ)
figure_type(environ)
return environ['tiddlyweb.config']['selector'](environ, start_response)
def _make_mapping_tiddler(environ, uri):
"""
Create and store the tiddler that will persist the mapping.
"""
store = environ['tiddlyweb.store']
try:
mapping_bag = ensure_bag(MAPPING_BAG, store, policy_dict=POLICY)
title_uuid = '%s' % uuid.uuid4()
tiddler = Tiddler(title_uuid, mapping_bag.name)
tiddler.fields['uri'] = uri
tiddler.fields['user'] = environ['tiddlyweb.usersign']['name']
store.put(tiddler)
except StoreError, exc:
raise HTTP400('Unable to create mapping: %s' % exc)
return title_uuid
def _map_to_uri(environ, identifier):
"""
Get host, path and user information about of the mapping tiddler.
Host is pulled out separately so that we can use virtualhosting
correctly, if necessary.
"""
store = environ['tiddlyweb.store']
try:
tiddler = Tiddler(identifier, MAPPING_BAG)
tiddler = store.get(tiddler)
uri = tiddler.fields['uri']
user = tiddler.fields['user']
_, netloc, path, params, query, fragment = urlparse.urlparse(uri)
host = netloc
path = urlparse.urlunparse(('', '', path, params, query, fragment))
return host, path, user
except (StoreError, KeyError), exc:
raise HTTP404('valid mapping not found: %s' % exc)
def _mapping_uri(environ, identifier):
"""
The full URI of a mapping uri.
"""
location = '%s/_/%s' % (server_base_url(environ), identifier)
return location
def _proxy_user(environ, username):
"""
Load up the correct user information for the user being proxied
in a mapping.
"""
store = environ['tiddlyweb.store']
try:
user = User(username)
user = store.get(user)
return {'name': user.usersign, 'roles': user.list_roles()}
except StoreError, exc:
raise HTTP400('invalid mapping: %s' % exc)
|
|
import os
from os.path import join as pjoin
import sys
from distutils.sysconfig import get_config_var
from numscons.core.utils import flatten
from numscons.core.misc import get_numscons_toolpaths, get_pythonlib_name, \
is_f77_gnu, get_vs_version, built_with_mstools, \
isfortran, isf2py, scons_get_paths, built_with_mingw, is_debug
from numscons.core.errors import InternalError
from numscons.numdist import msvc_runtime_library
def get_pythonlib_dir():
"""Returns a list of path to look for the python engine library
(pythonX.X.lib on win32, libpythonX.X.so on unix, etc...)."""
if os.name == 'nt':
return [pjoin(sys.exec_prefix, 'libs')]
else:
return [pjoin(sys.exec_prefix, 'lib')]
def is_bootstrapping(env):
return env['bootstrapping']
def is_bypassed(env):
return env['bypass']
def is_importing_environment(env):
return env['import_env']
def has_f77(env):
return len(env['f77_opt']) > 0
def customize_tools(env):
customize_pyext(env)
finalize_env(env)
apply_compilers_customization(env)
customize_link_flags(env)
def customize_pyext(env):
from SCons.Tool import Tool
from SCons.Tool.FortranCommon import CreateDialectActions, ShFortranEmitter
if sys.platform == 'win32' and is_debug(env):
env['PYEXTSUFFIX'] = "_d%s" % get_config_var('SO')
else:
env['PYEXTSUFFIX'] = get_config_var('SO')
t = Tool('pyext', toolpath = get_numscons_toolpaths(env))
t(env)
#-----------------------------------------------
# Extending pyext to handle fortran source code.
#-----------------------------------------------
# XXX: This is ugly: I don't see any way to do this cleanly.
pyext_obj = t._tool_module().createPythonObjectBuilder(env)
if env.has_key('F77') and env['F77']:
shcompaction = CreateDialectActions('F77')[2]
for suffix in env['F77FILESUFFIXES']:
pyext_obj.add_action(suffix, shcompaction)
pyext_obj.add_emitter(suffix, ShFortranEmitter)
# Customizing pyext to handle windows platform (msvc runtime, etc...)
# We don't do this in pyext because scons has no infrastructure to know
# whether we are using mingw or ms
if sys.platform == 'win32':
_customize_pyext_win32(env, t)
# XXX: Add numpy header path (will be used by NumpyExtension builder).
if is_bootstrapping(env):
env['NUMPYCPPPATH'] = scons_get_paths(env['include_bootstrap'])
else:
from numpy.distutils.misc_util import get_numpy_include_dirs
env['NUMPYCPPPATH'] = get_numpy_include_dirs()
def _customize_pyext_win32(env, pyext_tool):
from SCons.Action import Action
from SCons.Node.FS import default_fs
env.PrependUnique(LIBPATH = get_pythonlib_dir())
def dummy(target, source, env):
return target, source
def pyext_runtime(target = None, source = None, env = None):
# Action to handle msvc runtime problem with fortran/mingw: normally,
# when we link a python extension with mingw, we need to add the msvc
# runtime. BUT, if the extensions uses fortran, we should not.
# XXX: Is action the right way to do that ?
snodes = [default_fs.Entry(s) for s in source]
if isfortran(env, snodes) or isf2py(env, snodes):
env["PYEXTRUNTIME"] = [get_pythonlib_name()]
else:
env["PYEXTRUNTIME"] = [get_pythonlib_name(),
msvc_runtime_library()]
return 0
# We override the default emitter here because SHLIB emitter
# does a lot of checks we don't care about and are wrong
# anyway for python extensions.
env["BUILDERS"]["PythonExtension"].emitter = dummy
if built_with_mingw(env):
# We are overriding pyext coms here.
# XXX: This is ugly
pycc, pycxx, pylink = pyext_tool._tool_module().pyext_coms('posix')
env['PYEXTCCCOM'] = pycc
env['PYEXTCXXCOM'] = pycxx
env['PYEXTLINKCOM'] = pylink
pyext_runtime_action = Action(pyext_runtime, '')
old_action = env['BUILDERS']['PythonExtension'].action
env['BUILDERS']['PythonExtension'].action = \
Action([pyext_runtime_action, old_action])
def customize_link_flags(env):
# We sometimes need to put link flags at the really end of the command
# line, so we add a construction variable for it
env['LINKFLAGSEND'] = []
env['SHLINKFLAGSEND'] = ['$LINKFLAGSEND']
env['LDMODULEFLAGSEND'] = []
if built_with_mstools(env):
from SCons.Action import Action
# Sanity check: in case scons changes and we are not
# aware of it
if not isinstance(env["SHLINKCOM"], list):
msg = "Internal consistency check failed for MS compiler. This " \
"is bug, please contact the maintainer"
raise InternalError(msg)
if not isinstance(env["LDMODULECOM"], list):
msg = "Internal consistency check failed for MS compiler. This " \
"is bug, please contact the maintainer"
raise InternalError(msg)
# We replace the "real" shlib action of mslink by our
# own, which only differ in the linkdlagsend flags.
newshlibaction = Action('${TEMPFILE("$SHLINK $SHLINKFLAGS ' \
'$_SHLINK_TARGETS $( $_LIBDIRFLAGS $) ' \
'$_LIBFLAGS $SHLINKFLAGSEND $_PDB' \
'$_SHLINK_SOURCES")}')
env["SHLINKCOM"][0] = newshlibaction
env["LDMODULECOM"][0] = newshlibaction
newlibaction = '${TEMPFILE("$LINK $LINKFLAGS /OUT:$TARGET.windows ' \
'$( $_LIBDIRFLAGS $) $_LIBFLAGS $LINKFLAGSEND $_PDB ' \
'$SOURCES.windows")}'
env["LINKCOM"] = newlibaction
env['PYEXTLINKCOM'] = '%s $PYEXTLINKFLAGSEND' % env['PYEXTLINKCOM']
else:
env['LINKCOM'] = '%s $LINKFLAGSEND' % env['LINKCOM']
env['SHLINKCOM'] = '%s $SHLINKFLAGSEND' % env['SHLINKCOM']
env['LDMODULECOM'] = '%s $LDMODULEFLAGSEND' % env['LDMODULECOM']
env['PYEXTLINKCOM'] = '%s $PYEXTLINKFLAGSEND' % env['PYEXTLINKCOM']
def finalize_env(env):
"""Call this at the really end of the numpy environment initialization."""
# This will customize some things, to cope with some idiosyncraties with
# some tools, and are too specific to be in tools.
if built_with_mstools(env):
major = get_vs_version(env)[0]
# For VS 8 and above (VS 2005), use manifest for DLL
# XXX: this has nothing to do here, too
if major >= 8:
env['LINKCOM'] = [env['LINKCOM'],
'mt.exe -nologo -manifest ${TARGET}.manifest '\
'-outputresource:$TARGET;1']
env['SHLINKCOM'] = [env['SHLINKCOM'],
'mt.exe -nologo -manifest ${TARGET}.manifest '\
'-outputresource:$TARGET;2']
env['LDMODULECOM'] = [env['LDMODULECOM'],
'mt.exe -nologo -manifest ${TARGET}.manifest '\
'-outputresource:$TARGET;2']
if is_f77_gnu(env):
env.AppendUnique(F77FLAGS = ['-fno-second-underscore'])
if built_with_mingw(env):
env.AppendUnique(CFLAGS = '-mno-cygwin')
def apply_compilers_customization(env):
"""Apply customization to compilers' flags from the environment. Also take
into account user customization through shell variables."""
#------------------------------
# C compiler last customization
#------------------------------
# Apply optim and warn flags considering context
custom = env['NUMPY_CUSTOMIZATION']['C']
if 'CFLAGS' in os.environ:
env.Append(CFLAGS = "%s" % os.environ['CFLAGS'])
env.AppendUnique(CFLAGS = custom['extra'] + custom['thread'])
else:
env.AppendUnique(CFLAGS = flatten(custom.values()))
#--------------------------------
# F77 compiler last customization
#--------------------------------
if env.has_key('F77'):
custom = env['NUMPY_CUSTOMIZATION']['F77']
if 'FFLAGS' in os.environ:
env.Append(F77FLAGS = "%s" % os.environ['FFLAGS'])
env.AppendUnique(F77FLAGS = custom['extra'] + custom['thread'])
else:
env.AppendUnique(F77FLAGS = flatten(custom.values()))
#--------------------------------
# CXX compiler last customization
#--------------------------------
if env.has_key('CXX'):
custom = env['NUMPY_CUSTOMIZATION']['CXX']
if 'CXXFLAGS' in os.environ:
env.Append(CXXFLAGS = "%s" % os.environ['CXXFLAGS'])
env.AppendUnique(CXXFLAGS = custom['extra'] + custom['thread'])
else:
env.AppendUnique(CXXFLAGS = flatten(custom.values()))
if 'LDFLAGS' in os.environ:
env.Prepend(LINKFLAGS = os.environ['LDFLAGS'])
|
|
"""RoboMaker component for creating a simulation job."""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
from simulation_job.src.robomaker_simulation_job_spec import (
RoboMakerSimulationJobSpec,
RoboMakerSimulationJobInputs,
RoboMakerSimulationJobOutputs,
)
from common.sagemaker_component import (
SageMakerComponent,
ComponentMetadata,
SageMakerJobStatus,
)
from common.boto3_manager import Boto3Manager
from common.common_inputs import SageMakerComponentCommonInputs
@ComponentMetadata(
name="RoboMaker - Create Simulation Job",
description="Creates a simulation job.",
spec=RoboMakerSimulationJobSpec,
)
class RoboMakerSimulationJobComponent(SageMakerComponent):
"""RoboMaker component for creating a simulation job."""
def Do(self, spec: RoboMakerSimulationJobSpec):
super().Do(spec.inputs, spec.outputs, spec.output_paths)
def _get_job_status(self) -> SageMakerJobStatus:
response = self._rm_client.describe_simulation_job(job=self._arn)
status = response["status"]
if status in ["Completed"]:
return SageMakerJobStatus(
is_completed=True, has_error=False, raw_status=status
)
if status in ["Terminating", "Terminated", "Canceled"]:
if "failureCode" in response:
simulation_message = (
f"Simulation failed with code:{response['failureCode']}"
)
return SageMakerJobStatus(
is_completed=True,
has_error=True,
error_message=simulation_message,
raw_status=status,
)
else:
simulation_message = "Exited without error code.\n"
if "failureReason" in response:
simulation_message += (
f"Simulation exited with reason:{response['failureReason']}\n"
)
return SageMakerJobStatus(
is_completed=True,
has_error=False,
error_message=simulation_message,
raw_status=status,
)
if status in ["Failed", "RunningFailed"]:
failure_message = f"Simulation job is in status:{status}\n"
if "failureReason" in response:
failure_message += (
f"Simulation failed with reason:{response['failureReason']}"
)
if "failureCode" in response:
failure_message += (
f"Simulation failed with errorCode:{response['failureCode']}"
)
return SageMakerJobStatus(
is_completed=True,
has_error=True,
error_message=failure_message,
raw_status=status,
)
return SageMakerJobStatus(is_completed=False, raw_status=status)
def _configure_aws_clients(self, inputs: SageMakerComponentCommonInputs):
"""Configures the internal AWS clients for the component.
Args:
inputs: A populated list of user inputs.
"""
self._rm_client = Boto3Manager.get_robomaker_client(
self._get_component_version(),
inputs.region,
endpoint_url=inputs.endpoint_url,
assume_role_arn=inputs.assume_role,
)
self._cw_client = Boto3Manager.get_cloudwatch_client(
inputs.region, assume_role_arn=inputs.assume_role
)
def _after_job_complete(
self,
job: Dict,
request: Dict,
inputs: RoboMakerSimulationJobInputs,
outputs: RoboMakerSimulationJobOutputs,
):
outputs.output_artifacts = self._get_job_outputs()
logging.info(
"Simulation Job in RoboMaker: https://{}.console.aws.amazon.com/robomaker/home?region={}#/simulationJobs/{}".format(
inputs.region, inputs.region, self._job_id
)
)
def _on_job_terminated(self):
self._rm_client.cancel_simulation_job(application=self._arn)
def _create_job_request(
self,
inputs: RoboMakerSimulationJobInputs,
outputs: RoboMakerSimulationJobOutputs,
) -> Dict:
"""
Documentation:https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/robomaker.html#RoboMaker.Client.create_simulation_job
"""
# Need one of sim_app_arn or robot_app_arn to be provided
if not inputs.sim_app_arn and not inputs.robot_app_arn:
logging.error("Must specify a Simulation App ARN or a Robot App ARN.")
raise Exception("Could not create simulation job request")
request = self._get_request_template("robomaker.simulation.job")
# Set the required inputs
request["outputLocation"]["s3Bucket"] = inputs.output_bucket
request["outputLocation"]["s3Prefix"] = inputs.output_path
request["maxJobDurationInSeconds"] = inputs.max_run
request["iamRole"] = inputs.role
# Set networking inputs
if inputs.vpc_subnets:
request["vpcConfig"]["subnets"] = inputs.vpc_subnets
if inputs.vpc_security_group_ids:
request["vpcConfig"]["securityGroups"] = inputs.vpc_security_group_ids
if inputs.use_public_ip:
request["vpcConfig"]["assignPublicIp"] = inputs.use_public_ip
else:
request.pop("vpcConfig")
# Set simulation application inputs
if inputs.sim_app_arn:
if not inputs.sim_app_launch_config:
logging.error("Must specify a Launch Config for your Simulation App")
raise Exception("Could not create simulation job request")
sim_app = {
"application": inputs.sim_app_arn,
"launchConfig": inputs.sim_app_launch_config,
}
if inputs.sim_app_version:
sim_app["version"]: inputs.sim_app_version
if inputs.sim_app_world_config:
sim_app["worldConfigs"]: inputs.sim_app_world_config
request["simulationApplications"].append(sim_app)
else:
request.pop("simulationApplications")
# Set robot application inputs
if inputs.robot_app_arn:
if not inputs.robot_app_launch_config:
logging.error("Must specify a Launch Config for your Robot App")
raise Exception("Could not create simulation job request")
robot_app = {
"application": inputs.robot_app_arn,
"launchConfig": inputs.robot_app_launch_config,
}
if inputs.robot_app_version:
robot_app["version"]: inputs.robot_app_version
request["robotApplications"].append(robot_app)
else:
request.pop("robotApplications")
# Set optional inputs
if inputs.record_ros_topics:
request["loggingConfig"]["recordAllRosTopics"] = inputs.record_ros_topics
else:
request.pop("loggingConfig")
if inputs.failure_behavior:
request["failureBehavior"] = inputs.failure_behavior
else:
request.pop("failureBehavior")
if inputs.data_sources:
request["dataSources"] = inputs.data_sources
else:
request.pop("dataSources")
if inputs.sim_unit_limit:
request["compute"]["simulationUnitLimit"] = inputs.sim_unit_limit
self._enable_tag_support(request, inputs)
return request
def _submit_job_request(self, request: Dict) -> Dict:
return self._rm_client.create_simulation_job(**request)
def _after_submit_job_request(
self,
job: Dict,
request: Dict,
inputs: RoboMakerSimulationJobInputs,
outputs: RoboMakerSimulationJobOutputs,
):
outputs.arn = self._arn = job["arn"]
outputs.job_id = self._job_id = job["arn"].split("/")[-1]
logging.info(f"Started Robomaker Simulation Job with ID: {self._job_id}")
logging.info(
"Simulation Job in RoboMaker: https://{}.console.aws.amazon.com/robomaker/home?region={}#/simulationJobs/{}".format(
inputs.region, inputs.region, self._job_id
)
)
def _print_logs_for_job(self):
self._print_cloudwatch_logs("/aws/robomaker/SimulationJobs", self._job_id)
def _get_job_outputs(self):
"""Map the S3 outputs of a simulation job to a dictionary object.
Returns:
dict: A dictionary of output S3 URIs.
"""
response = self._rm_client.describe_simulation_job(job=self._arn)
artifact_uri = f"s3://{response['outputLocation']['s3Bucket']}/{response['outputLocation']['s3Prefix']}"
return artifact_uri
if __name__ == "__main__":
import sys
spec = RoboMakerSimulationJobSpec(sys.argv[1:])
component = RoboMakerSimulationJobComponent()
component.Do(spec)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import call
from watchtower import CloudWatchLogHandler
from airflow.models import DAG, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.utils.log.cloudwatch_task_handler import CloudwatchTaskHandler
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
try:
import boto3
import moto
from moto import mock_logs
except ImportError:
mock_logs = None
@unittest.skipIf(mock_logs is None, "Skipping test because moto.mock_logs is not available")
@mock_logs
class TestCloudwatchTaskHandler(unittest.TestCase):
@conf_vars({('logging', 'remote_log_conn_id'): 'aws_default'})
def setUp(self):
self.remote_log_group = 'log_group_name'
self.region_name = 'us-west-2'
self.local_log_location = 'local/log/location'
self.filename_template = '{dag_id}/{task_id}/{execution_date}/{try_number}.log'
self.cloudwatch_task_handler = CloudwatchTaskHandler(
self.local_log_location,
f"arn:aws:logs:{self.region_name}:11111111:log-group:{self.remote_log_group}",
self.filename_template,
)
self.cloudwatch_task_handler.hook
date = datetime(2020, 1, 1)
dag_id = 'dag_for_testing_file_task_handler'
task_id = 'task_for_testing_file_log_handler'
self.dag = DAG(dag_id=dag_id, start_date=date)
task = DummyOperator(task_id=task_id, dag=self.dag)
self.ti = TaskInstance(task=task, execution_date=date)
self.ti.try_number = 1
self.ti.state = State.RUNNING
self.remote_log_stream = '{}/{}/{}/{}.log'.format(
dag_id, task_id, date.isoformat(), self.ti.try_number
).replace(':', '_')
moto.core.moto_api_backend.reset()
self.conn = boto3.client('logs', region_name=self.region_name)
def tearDown(self):
self.cloudwatch_task_handler.handler = None
def test_hook(self):
self.assertIsInstance(self.cloudwatch_task_handler.hook, AwsLogsHook)
@conf_vars({('logging', 'remote_log_conn_id'): 'aws_default'})
def test_hook_raises(self):
handler = CloudwatchTaskHandler(
self.local_log_location,
f"arn:aws:logs:{self.region_name}:11111111:log-group:{self.remote_log_group}",
self.filename_template,
)
with mock.patch.object(handler.log, 'error') as mock_error:
with mock.patch("airflow.providers.amazon.aws.hooks.logs.AwsLogsHook") as mock_hook:
mock_hook.side_effect = Exception('Failed to connect')
# Initialize the hook
handler.hook
mock_error.assert_called_once_with(
'Could not create an AwsLogsHook with connection id "%s". Please make '
'sure that airflow[aws] is installed and the Cloudwatch logs connection exists.',
'aws_default',
)
def test_handler(self):
self.cloudwatch_task_handler.set_context(self.ti)
self.assertIsInstance(self.cloudwatch_task_handler.handler, CloudWatchLogHandler)
def test_write(self):
handler = self.cloudwatch_task_handler
handler.set_context(self.ti)
messages = [str(i) for i in range(10)]
with mock.patch("watchtower.CloudWatchLogHandler.emit") as mock_emit:
for message in messages:
handler.handle(message)
mock_emit.assert_has_calls([call(message) for message in messages])
def test_read(self):
# Confirmed via AWS Support call:
# CloudWatch events must be ordered chronologically otherwise
# boto3 put_log_event API throws InvalidParameterException
# (moto does not throw this exception)
generate_log_events(
self.conn,
self.remote_log_group,
self.remote_log_stream,
[
{'timestamp': 10000, 'message': 'First'},
{'timestamp': 20000, 'message': 'Second'},
{'timestamp': 30000, 'message': 'Third'},
],
)
expected = (
'*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\nFirst\nSecond\nThird\n'
)
self.assertEqual(
self.cloudwatch_task_handler.read(self.ti),
(
[[('', expected.format(self.remote_log_group, self.remote_log_stream))]],
[{'end_of_log': True}],
),
)
def test_read_wrong_log_stream(self):
generate_log_events(
self.conn,
self.remote_log_group,
'alternate_log_stream',
[
{'timestamp': 10000, 'message': 'First'},
{'timestamp': 20000, 'message': 'Second'},
{'timestamp': 30000, 'message': 'Third'},
],
)
msg_template = '*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\n{}\n'
error_msg = 'Could not read remote logs from log_group: {} log_stream: {}.'.format(
self.remote_log_group, self.remote_log_stream
)
self.assertEqual(
self.cloudwatch_task_handler.read(self.ti),
(
[[('', msg_template.format(self.remote_log_group, self.remote_log_stream, error_msg))]],
[{'end_of_log': True}],
),
)
def test_read_wrong_log_group(self):
generate_log_events(
self.conn,
'alternate_log_group',
self.remote_log_stream,
[
{'timestamp': 10000, 'message': 'First'},
{'timestamp': 20000, 'message': 'Second'},
{'timestamp': 30000, 'message': 'Third'},
],
)
msg_template = '*** Reading remote log from Cloudwatch log_group: {} log_stream: {}.\n{}\n'
error_msg = 'Could not read remote logs from log_group: {} log_stream: {}.'.format(
self.remote_log_group, self.remote_log_stream
)
self.assertEqual(
self.cloudwatch_task_handler.read(self.ti),
(
[[('', msg_template.format(self.remote_log_group, self.remote_log_stream, error_msg))]],
[{'end_of_log': True}],
),
)
def test_close_prevents_duplicate_calls(self):
with mock.patch("watchtower.CloudWatchLogHandler.close") as mock_log_handler_close:
with mock.patch("airflow.utils.log.file_task_handler.FileTaskHandler.set_context"):
self.cloudwatch_task_handler.set_context(self.ti)
for _ in range(5):
self.cloudwatch_task_handler.close()
mock_log_handler_close.assert_called_once()
def generate_log_events(conn, log_group_name, log_stream_name, log_events):
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(logGroupName=log_group_name, logStreamName=log_stream_name)
conn.put_log_events(logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=log_events)
|
|
# -*- coding: utf-8 -*-
"""
Bridge to the pandas library.
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import pandas as pd
import warnings
import quantities as pq
from elephant.neo_tools import (extract_neo_attributes, get_all_epochs,
get_all_events, get_all_spiketrains)
warnings.simplefilter('once', DeprecationWarning)
warnings.warn("pandas_bridge module will be removed in Elephant v0.8.x",
DeprecationWarning)
def _multiindex_from_dict(inds):
"""Given a dictionary, return a `pandas.MultiIndex`.
Parameters
----------
inds : dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
Returns
-------
pandas MultiIndex
"""
names, indexes = zip(*sorted(inds.items()))
return pd.MultiIndex.from_tuples([indexes], names=names)
def _sort_inds(obj, axis=0):
"""Put the indexes and index levels of a pandas object in sorted order.
Paramters
---------
obj : pandas Series, DataFrame, Panel, or Panel4D
The object whose indexes should be sorted.
axis : int, list, optional, 'all'
The axis whose indexes should be sorted. Default is 0.
Can also be a list of indexes, in which case all of those axes
are sorted. If 'all', sort all indexes.
Returns
-------
pandas Series, DataFrame, Panel, or Panel4D
A copy of the object with indexes sorted.
Indexes are sorted in-place.
"""
if axis == 'all':
return _sort_inds(obj, axis=range(obj.ndim))
if hasattr(axis, '__iter__'):
for iax in axis:
obj = _sort_inds(obj, iax)
return obj
obj = obj.reorder_levels(sorted(obj.axes[axis].names), axis=axis)
return obj.sort_index(level=0, axis=axis, sort_remaining=True)
def _extract_neo_attrs_safe(obj, parents=True, child_first=True):
"""Given a neo object, return a dictionary of attributes and annotations.
This is done in a manner that is safe for `pandas` indexes.
Parameters
----------
obj : neo object
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
"""
res = extract_neo_attributes(obj, skip_array=True, skip_none=True,
parents=parents, child_first=child_first)
for key, value in res.items():
res[key] = _convert_value_safe(value)
key2 = _convert_value_safe(key)
if key2 is not key:
res[key2] = res.pop(key)
return res
def _convert_value_safe(value):
"""Convert `neo` values to a value compatible with `pandas`.
Some types and dtypes used with neo are not safe to use with pandas in some
or all situations.
`quantities.Quantity` don't follow the normal python rule that values
with that are equal should have the same hash, making it fundamentally
incompatible with `pandas`.
On python 3, `pandas` coerces `S` dtypes to bytes, which are not always
safe to use.
Parameters
----------
value : any
Value to convert (if it has any known issues).
Returns
-------
any
`value` or a version of value with potential problems fixed.
"""
if hasattr(value, 'dimensionality'):
return (value.magnitude.tolist(), str(value.dimensionality))
if hasattr(value, 'dtype') and value.dtype.kind == 'S':
return value.astype('U').tolist()
if hasattr(value, 'tolist'):
return value.tolist()
if hasattr(value, 'decode') and not hasattr(value, 'encode'):
return value.decode('UTF8')
return value
def spiketrain_to_dataframe(spiketrain, parents=True, child_first=True):
"""Convert a `neo.SpikeTrain` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the spike time converted to a `float` value in seconds.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the spike number.
Parameters
----------
spiketrain : neo SpikeTrain
The SpikeTrain to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
Returns
-------
pandas DataFrame
A DataFrame containing the spike times from `spiketrain`.
Notes
-----
The index name is `spike_number`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(spiketrain,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = spiketrain.magnitude
times = pq.Quantity(times, spiketrain.units).rescale('s').magnitude
times = times[np.newaxis].T
index = pd.Index(np.arange(len(spiketrain)), name='spike_number')
pdobj = pd.DataFrame(times, index=index, columns=columns)
return _sort_inds(pdobj, axis=1)
def event_to_dataframe(event, parents=True, child_first=True):
"""Convert a `neo.core.Event` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the event label from the `event.label` attribute.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is the time stamp from the `event.times` attribute.
Parameters
----------
event : neo Event
The Event to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `event`.
Notes
-----
If the length of event.times and event.labels are not the same,
the longer will be truncated to the length of the shorter.
The index name is `times`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(event,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = event.times.rescale('s').magnitude
labels = event.labels.astype('U')
times = times[:len(labels)]
labels = labels[:len(times)]
index = pd.Index(times, name='times')
pdobj = pd.DataFrame(labels[np.newaxis].T, index=index, columns=columns)
return _sort_inds(pdobj, axis=1)
def epoch_to_dataframe(epoch, parents=True, child_first=True):
"""Convert a `neo.core.Epoch` to a `pandas.DataFrame`.
The `pandas.DataFrame` object has a single column, with each element
being the epoch label from the `epoch.label` attribute.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations. The `index`
is a `pandas.MultiIndex`, with the first index being the time stamp from
the `epoch.times` attribute and the second being the duration from
the `epoch.durations` attribute.
Parameters
----------
epoch : neo Epoch
The Epoch to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `epoch`.
Notes
-----
If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
not the same, the longer will be truncated to the length of the shortest.
The index names for `epoch.times` and `epoch.durations` are `times` and
`durations`, respectively.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
attrs = _extract_neo_attrs_safe(epoch,
parents=parents, child_first=child_first)
columns = _multiindex_from_dict(attrs)
times = epoch.times.rescale('s').magnitude
durs = epoch.durations.rescale('s').magnitude
labels = epoch.labels.astype('U')
minlen = min([len(durs), len(times), len(labels)])
index = pd.MultiIndex.from_arrays([times[:minlen], durs[:minlen]],
names=['times', 'durations'])
pdobj = pd.DataFrame(labels[:minlen][np.newaxis].T,
index=index, columns=columns)
return _sort_inds(pdobj, axis='all')
def _multi_objs_to_dataframe(container, conv_func, get_func,
parents=True, child_first=True):
"""Convert one or more of a given `neo` object to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
the object, as well as any neo object that can hold the object.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
object.
Parameters
----------
container : list, tuple, iterable, dict, neo container object
The container for the objects to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the converted objects.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
res = pd.concat([conv_func(obj, parents=parents, child_first=child_first)
for obj in get_func(container)], axis=1)
return _sort_inds(res, axis=1)
def multi_spiketrains_to_dataframe(container,
parents=True, child_first=True):
"""Convert one or more `neo.SpikeTrain` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
The `pandas.DataFrame` object has one column for each spiketrain, with each
element being the spike time converted to a `float` value in seconds.
columns are padded to the same length with `NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
spiketrain. The `index` is the spike number.
Parameters
----------
container : list, tuple, iterable, dict,
neo Block, neo Segment, neo Unit, neo ChannelIndex
The container for the spiketrains to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the spike times from `container`.
Notes
-----
The index name is `spike_number`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
spiketrain_to_dataframe,
get_all_spiketrains,
parents=parents, child_first=child_first)
def multi_events_to_dataframe(container, parents=True, child_first=True):
"""Convert one or more `neo.Event` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
events, as well as any neo object that can hold events:
`neo.Block` and `neo.Segment`. Objects are searched recursively, so the
objects can be nested (such as a list of blocks).
The `pandas.DataFrame` object has one column for each event, with each
element being the event label. columns are padded to the same length with
`NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
event. The `index` is the time stamp from the `event.times` attribute.
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the events to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `container`.
Notes
-----
If the length of event.times and event.labels are not the same for any
individual event, the longer will be truncated to the length of the
shorter for that event. Between events, lengths can differ.
The index name is `times`.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
event_to_dataframe, get_all_events,
parents=parents, child_first=child_first)
def multi_epochs_to_dataframe(container, parents=True, child_first=True):
"""Convert one or more `neo.Epoch` objects to a `pandas.DataFrame`.
The objects can be any list, dict, or other iterable or mapping containing
epochs, as well as any neo object that can hold epochs:
`neo.Block` and `neo.Segment`. Objects are searched recursively, so the
objects can be nested (such as a list of blocks).
The `pandas.DataFrame` object has one column for each epoch, with each
element being the epoch label. columns are padded to the same length with
`NaN` values.
The column heading is a `pandas.MultiIndex` with one index
for each of the scalar attributes and annotations of the respective
epoch. The `index` is a `pandas.MultiIndex`, with the first index being
the time stamp from the `epoch.times` attribute and the second being the
duration from the `epoch.durations` attribute.
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the epochs to convert.
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
Returns
-------
pandas DataFrame
A DataFrame containing the labels from `container`.
Notes
-----
If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
not the same for any individual epoch, the longer will be truncated to the
length of the shorter for that epoch. Between epochs, lengths can differ.
The index level names for `epoch.times` and `epoch.durations` are
`times` and `durations`, respectively.
Attributes that contain non-scalar values are skipped. So are
annotations or attributes containing a value of `None`.
`quantity.Quantities` types are incompatible with `pandas`, so attributes
and annotations of that type are converted to a tuple where the first
element is the scalar value and the second is the string representation of
the units.
"""
return _multi_objs_to_dataframe(container,
epoch_to_dataframe, get_all_epochs,
parents=parents, child_first=child_first)
def slice_spiketrain(pdobj, t_start=None, t_stop=None):
"""Slice a `pandas.DataFrame`, changing indices appropriately.
Values outside the sliced range are converted to `NaN` values.
Slicing happens over columns.
This sets the `t_start` and `t_stop` column indexes to be the new values.
Otherwise it is the same as setting values outside the range to `NaN`.
Parameters
----------
pdobj : pandas DataFrame
The DataFrame to slice.
t_start : float, optional.
If specified, the returned DataFrame values less than this set
to `NaN`.
Default is `None` (do not use this argument).
t_stop : float, optional.
If specified, the returned DataFrame values greater than this set
to `NaN`.
Default is `None` (do not use this argument).
Returns
-------
pdobj : scalar, pandas Series, DataFrame, or Panel
The returned data type is the same as the type of `pdobj`
Notes
-----
The order of the index and/or column levels of the returned object may
differ from the order of the original.
If `t_start` or `t_stop` is specified, all columns indexes will be changed
to the respective values, including those already within the new range.
If `t_start` or `t_stop` is not specified, those column indexes will not
be changed.
Returns a copy, even if `t_start` and `t_stop` are both `None`.
"""
if t_start is None and t_stop is None:
return pdobj.copy()
if t_stop is not None:
pdobj[pdobj > t_stop] = np.nan
pdobj = pdobj.T.reset_index(level='t_stop')
pdobj['t_stop'] = t_stop
pdobj = pdobj.set_index('t_stop', append=True).T
pdobj = _sort_inds(pdobj, axis=1)
if t_start is not None:
pdobj[pdobj < t_start] = np.nan
pdobj = pdobj.T.reset_index(level='t_start')
pdobj['t_start'] = t_start
pdobj = pdobj.set_index('t_start', append=True).T
pdobj = _sort_inds(pdobj, axis=1)
return pdobj
|
|
from logging import getLogger
from django.utils.translation import ugettext_lazy as _
from api.status import HTTP_201_CREATED
from api.api_views import APIView
from api.utils.db import get_object
from api.utils.views import call_api_view
from api.decorators import catch_api_exception
from api.exceptions import ExpectationFailed, InvalidInput, ObjectNotFound
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.dns.domain.utils import get_domain
from api.dns.record.serializers import RecordSerializer
from api.dns.messages import LOG_RECORD_CREATE, LOG_RECORD_UPDATE, LOG_RECORD_DELETE, LOG_RECORDS_DELETE
from pdns.models import Domain, Record
logger = getLogger(__name__)
class RecordView(APIView):
Domain = Domain
Record = Record
_log_failure = False
dc_bound = False
order_by_default = ('id',)
order_by_fields = ('id', 'name', 'type', 'ttl', 'disabled', 'changed')
order_by_field_map = {'changed': 'change_date'}
def __init__(self, request, domain_name, record_id, data, record=None, task_id=None, related_obj=None):
super(RecordView, self).__init__(request)
self.domain_name = domain_name
self.record_id = record_id
self.data = data
self.task_id = task_id
self.related_obj = related_obj # Added into detail dict for task log purposes
if record: # Shortcut used by VmDefineSerializer.save_ptr/save_a and NodeDefineView/node_sysinfo_cb
self.record = record
self.domain = record.domain
self._log_failure = True
else:
self._set_record()
def _set_record(self):
request = self.request
record_id = self.record_id
# Check IsSuperAdmin or IsDomainOwner permissions in get_domain
self.domain = get_domain(request, self.domain_name, exists_ok=True, noexists_fail=True)
# Records for slave domains cannot be modified
if request.method != 'GET' and self.domain.type in (Domain.SLAVE, Domain.SUPERSLAVE):
raise ExpectationFailed(_('Changing DNS records is not allowed for %s domain') % self.domain.type)
if record_id is None: # Get many
records = self.data.get('records', None)
qs = self.domain.record_set.select_related('domain').order_by(*self.order_by)
if records is None:
if request.method == 'DELETE':
raise InvalidInput('Invalid records')
self.record = qs
else:
if not isinstance(records, (tuple, list)):
raise InvalidInput('Invalid records')
self.record = qs.filter(id__in=records)
else:
if record_id == 0: # New record
self.record = Record(domain=self.domain)
else: # Existing record
self.record = get_object(request, Record, {'domain': self.domain, 'id': record_id}, sr=('domain',),
noexists_fail=True)
def _fix_detail_dict(self, dd):
related_obj = self.related_obj
if related_obj:
# noinspection PyProtectedMember
dd[related_obj._meta.verbose_name_raw.lower()] = related_obj.log_name
return dd
def log_failure(self, msg):
if self._log_failure:
return {
'detail_dict': self._fix_detail_dict(self.data.copy()),
'msg': msg,
}
else:
return {}
def get(self, many=False):
if many or not self.record_id:
if self.full:
if self.record:
res = RecordSerializer(self.request, self.record, many=True).data
else:
res = []
else:
res = list(self.record.values_list('id', flat=True))
else:
res = RecordSerializer(self.request, self.record).data
return SuccessTaskResponse(self.request, res, dc_bound=False)
def post(self):
ser = RecordSerializer(self.request, self.record, data=self.data)
if not ser.is_valid():
return FailureTaskResponse(self.request, ser.errors, obj=self.domain, dc_bound=False,
task_id=self.task_id, **self.log_failure(LOG_RECORD_CREATE))
ser.object.save()
return SuccessTaskResponse(self.request, ser.data, status=HTTP_201_CREATED, obj=self.domain,
detail_dict=self._fix_detail_dict(ser.detail_dict()), msg=LOG_RECORD_CREATE,
task_id=self.task_id, dc_bound=False)
def put(self):
record = self.record
ser = RecordSerializer(self.request, record, data=self.data, partial=True)
if not ser.is_valid():
return FailureTaskResponse(self.request, ser.errors, obj=self.domain, dc_bound=False,
task_id=self.task_id, **self.log_failure(LOG_RECORD_UPDATE))
ser.object.save()
return SuccessTaskResponse(self.request, ser.data, obj=self.domain, msg=LOG_RECORD_UPDATE, dc_bound=False,
task_id=self.task_id, detail_dict=self._fix_detail_dict(ser.detail_dict()))
def delete(self, many=False):
record = self.record
if many:
assert not self.record_id
if not record: # SELECT count(*) from record ???
raise ObjectNotFound(model=Record)
msg = LOG_RECORDS_DELETE
dd = {'records': [r.desc for r in record]}
else:
msg = LOG_RECORD_DELETE
dd = {'record': record.desc}
record.delete()
return SuccessTaskResponse(self.request, None, obj=self.domain, msg=msg, detail_dict=self._fix_detail_dict(dd),
task_id=self.task_id, dc_bound=False)
@classmethod
def internal_response(cls, request, method, record, data, task_id=None, related_obj=None):
"""Called by VmDefineSerializer"""
return call_api_view(request, method, cls, record.domain.name, record.id, data=data, record=record,
task_id=task_id, related_obj=related_obj, api_class=True, log_response=True)
@classmethod
def internal_domain_get(cls, domain_name, task_id=None):
"""Used internally by some api functions"""
try:
return Domain.objects.get(name=domain_name)
except Domain.DoesNotExist:
raise ObjectNotFound(model=Domain, task_id=task_id)
@classmethod
@catch_api_exception
def add_or_update_record(cls, request, record_type, domain_name, name, content, task_id=None, **kwargs):
"""Called internally by some api functions"""
if not request.dc.settings.DNS_ENABLED:
logger.info('DNS support disabled: skipping add_or_update_record(%r, %r, %r %r)',
record_type, domain_name, name, content)
return None
name = name.lower() # DB constraint c_lowercase_name
domain = cls.internal_domain_get(domain_name, task_id=task_id)
try:
record = Record.objects.get(type=record_type, name=name, domain=domain)
except Record.DoesNotExist:
logger.info('Adding %s record "%s" with content "%s" on domain "%s"', record_type, name, content, domain)
record = Record(domain=domain)
method = 'POST'
data = {'type': record_type, 'domain': domain_name, 'name': name, 'content': content}
else:
logger.info('Updating %s record "%s" with content "%s" on domain "%s"', record_type, name, content, domain)
method = 'PUT'
data = {'content': content}
return cls.internal_response(request, method, record, data, task_id=task_id, **kwargs)
@classmethod
@catch_api_exception
def delete_record(cls, request, record_type, domain_name, name, task_id=None, **kwargs):
"""Called internally by some api functions"""
if not request.dc.settings.DNS_ENABLED:
logger.info('DNS support disabled: skipping delete_record(%r, %r, %r)',
record_type, domain_name, name)
return None
name = name.lower() # DB constraint c_lowercase_name
domain = cls.internal_domain_get(domain_name, task_id=task_id)
try:
record = Record.objects.get(name=name, type=record_type, domain=domain)
except Record.DoesNotExist:
raise ObjectNotFound(model=Record)
logger.info('Deleting %s record "%s" on domain "%s"', record_type, name, domain)
return cls.internal_response(request, 'DELETE', record, {}, task_id=task_id, **kwargs)
|
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Dockerc plugin."""
import gettext
from otopi import plugin, util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup import dialog
from ovirt_engine_setup.dockerc import constants as odockerccons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Dockerc plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
odockerccons.RemoveEnv.REMOVE_DOCKERC,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
self._enabled = not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
] and self.environment[
odockerccons.RemoveEnv.REMOVE_DCLIST
]
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
name=odockerccons.Stages.REMOVE_CUSTOMIZATION_DOCKERC,
before=(
osetupcons.Stages.DIALOG_TITLES_E_PRODUCT_OPTIONS,
),
condition=lambda self: self._enabled,
)
def _customization(self):
if self.environment[
osetupcons.RemoveEnv.REMOVE_ALL
]:
self.environment[
odockerccons.RemoveEnv.REMOVE_DOCKERC
] = True
else:
if self.environment[
odockerccons.RemoveEnv.REMOVE_DOCKERC
] is None:
self.environment[
odockerccons.RemoveEnv.REMOVE_DOCKERC
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_REMOVE_DOCKERC',
note=_(
'Do you want to remove the Setup-deployed\n'
'Docker containers ({clist})?\n'
'Data will be lost\n'
'(@VALUES@) [@DEFAULT@]: '
).format(
clist=self.environment[
odockerccons.RemoveEnv.REMOVE_DCLIST
]
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=False,
)
if self.environment[
odockerccons.RemoveEnv.REMOVE_DOCKERC
]:
self.environment[
odockerccons.ConfigEnv.DOCKERC_NEEDED
] = True
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: (
self.environment[odockerccons.RemoveEnv.REMOVE_DOCKERC] and
self._enabled
),
)
def _misc(self):
import docker
dcli = docker.Client(base_url='unix://var/run/docker.sock')
if self.environment[
odockerccons.RemoveEnv.REMOVE_DCLIST
]:
rlist = [
x.strip()
for x in self.environment[
odockerccons.RemoveEnv.REMOVE_DCLIST
].split(',')
if x
]
else:
rlist = []
for cont in rlist:
self.logger.info(_('Stopping {cname}').format(cname=cont))
try:
dcli.stop(
container=cont,
timeout=60,
)
except docker.errors.APIError as ex:
if ex.response.status_code == 404:
self.logger.warning(
_(
'Unable to stop {cname} container'
).format(
cname=cont,
)
)
else:
raise ex
self.logger.info(_('Removing {cname}').format(cname=cont))
try:
dcli.remove_container(
container=cont,
)
except docker.errors.APIError as ex:
if ex.response.status_code == 404:
self.logger.warning(
_(
'Unable to remove {cname} container'
).format(
cname=cont,
)
)
else:
raise ex
still_deployed = [
str(name).lstrip('/')
for d in dcli.containers(all=True)
for name in d['Names']
]
if still_deployed:
self.logger.info(
_(
'Keeping docker enabled and running cause other '
'containers are still present:\n'
'{clist}'
).format(
clist=', '.join(still_deployed),
)
)
else:
self.logger.info(_('Stopping and disabling docker'))
if self.services.exists(
name=odockerccons.Const.DOCKER_SERVICE_NANE
):
self.services.startup(
name=odockerccons.Const.DOCKER_SERVICE_NANE,
state=False,
)
self.services.state(
name=odockerccons.Const.DOCKER_SERVICE_NANE,
state=False,
)
self.environment[
odockerccons.RemoveEnv.REMOVE_DCLIST
] = ''
# vim: expandtab tabstop=4 shiftwidth=4
|
|
"""Collections contain content documents and blueprints."""
from . import documents
from . import messages
from grow.common import structures
from grow.common import utils
from grow.pods import locales
import json
import operator
import os
_all = '__no-locale'
class Error(Exception):
pass
class CollectionNotEmptyError(Error):
pass
class BadCollectionNameError(Error, ValueError):
pass
class CollectionDoesNotExistError(Error, ValueError):
pass
class CollectionExistsError(Error):
pass
class BadFieldsError(Error, ValueError):
pass
class NoLocalesError(Error):
pass
class Collection(object):
def __init__(self, pod_path, _pod):
utils.validate_name(pod_path)
self.pod = _pod
self.collection_path = pod_path.lstrip('/content')
self.pod_path = pod_path
self._default_locale = _pod.podspec.default_locale
self._blueprint_path = os.path.join(self.pod_path, '_blueprint.yaml')
def __repr__(self):
return '<Collection "{}">'.format(self.collection_path)
@classmethod
def list(cls, pod):
paths = pod.list_dir('/content/')
# TODO: replace with depth
clean_paths = set()
for path in paths:
parts = path.split('/')
if len(parts) >= 2: # Disallow files in root-level /content/ dir.
clean_paths.add(os.path.join('/content', parts[0]))
return [cls(pod_path, _pod=pod) for pod_path in clean_paths]
def exists(self):
return self.pod.file_exists(self._blueprint_path)
def create_from_message(self, message):
if self.exists():
raise CollectionExistsError('{} already exists.'.format(self))
self.update_from_message(message)
return self
@classmethod
def get(cls, collection_path, _pod):
collection = cls(collection_path, _pod)
if not collection.exists():
raise CollectionDoesNotExistError('{} does not exist.'.format(collection))
return collection
def get_doc(self, pod_path, locale=None):
doc = documents.Document(pod_path, locale=locale, _pod=self.pod, _collection=self)
if not doc.exists():
raise documents.DocumentDoesNotExistError('{} does not exist.'.format(doc))
return doc
@property
@utils.memoize
def yaml(self):
return utils.parse_yaml(self.pod.read_file(self._blueprint_path))
def list_categories(self):
return self.yaml.get('categories')
@property
def title(self):
return self.yaml.get('title')
def delete(self):
if len(self.list_documents(include_hidden=True)):
text = 'Collections that are not empty cannot be deleted.'
raise CollectionNotEmptyError(text)
self.pod.delete_file(self._blueprint_path)
def update_from_message(self, message):
if not message.fields:
raise BadFieldsError('Fields are required to create a collection.')
fields = json.loads(message.fields)
fields = utils.dump_yaml(fields)
self.pod.write_file(self._blueprint_path, fields)
def get_view(self):
return self.yaml.get('view')
def get_path_format(self):
return self.yaml.get('path')
# TODO(jeremydw): Consolidate list_docs, search_docs, and list_documents!
# Implement a thread pool for document parsing and use a map of files to
# paths for routing efficiency.
def list_docs(self, order_by=None, reverse=None):
# TODO(jeremydw): Implement this, and search, and kill list_documents.
pass
def search_docs(self, order_by=None, locale=None):
order_by = 'order' if order_by is None else order_by
sorted_docs = structures.SortedCollection(key=operator.attrgetter(order_by))
for path in self.pod.list_dir(self.pod_path):
pod_path = os.path.join(self.pod_path, path.lstrip('/'))
slug, ext = os.path.splitext(os.path.basename(pod_path))
if slug.startswith('_') or ext not in messages.extensions_to_formats:
continue
doc = self.get_doc(pod_path)
if locale is None:
sorted_docs.insert(doc)
continue
for each_locale in doc.list_locales():
if each_locale == locale:
sorted_docs.insert(self.get_doc(pod_path, locale=locale))
return sorted_docs
def list_documents(self, order_by=None, reverse=None, include_hidden=False, locale=_all):
if order_by is None:
order_by = 'order'
if reverse is None:
reverse = False
paths = self.pod.list_dir(self.pod_path)
sorted_docs = structures.SortedCollection(key=operator.attrgetter(order_by))
def process(path):
pod_path = os.path.join(self.pod_path, path.lstrip('/'))
slug, ext = os.path.splitext(os.path.basename(pod_path))
if (slug.startswith('_')
or ext not in messages.extensions_to_formats
or not pod_path):
return
doc = self.get_doc(pod_path)
if locale in [_all, None]:
sorted_docs.insert(doc)
if locale is None:
return
for each_locale in doc.list_locales():
if each_locale == self._default_locale:
continue
if each_locale == locale or locale == _all:
doc = self.get_doc(pod_path, locale=each_locale)
sorted_docs.insert(doc)
for path in paths:
process(path)
return reversed(sorted_docs) if reverse else sorted_docs
def list_servable_documents(self, include_hidden=False):
docs = []
for doc in self.list_documents(include_hidden=include_hidden):
if self.yaml.get('draft'):
continue
if not doc.has_url() or not doc.get_view():
continue
docs.append(doc)
return docs
@property
def localization(self):
return self.yaml.get('localization')
def list_locales(self):
if 'localization' in self.yaml:
if self.yaml['localization'].get('use_podspec_locales'):
return self.pod.list_locales()
try:
return locales.Locale.parse_codes(self.localization['locales'])
except KeyError:
# Locales inherited from podspec.
podspec = self.pod.get_podspec()
config = podspec.get_config()
if 'localization' in config and 'locales' in config['localization']:
return locales.Locale.parse_codes(config['localization']['locales'])
raise NoLocalesError('{} has no locales.')
return []
def search_documents(self, order_by='order'):
return self.list_documents(order_by=order_by)
def to_message(self):
message = messages.CollectionMessage()
message.title = self.title
message.collection_path = self.collection_path
return message
|
|
import datetime
import logging
import os
from typing import List, Optional
from PyQt5 import QtWidgets, QtGui
from .script import ScriptUI
from .wizard.sequencewizard import SequenceWizard
from .scripting_ui import Ui_Form
from ...utils.filebrowsers import getOpenFile, getSaveFile
from ....core2.instrument.components.interpreter import ParsingError
from ....core2.instrument.instrument import Instrument
from ....core2.commands import Command
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Scripting(QtWidgets.QWidget, Ui_Form):
mainwindow: "MainWindow"
instrument: Instrument
scripts: List[ScriptUI]
wizard: Optional[SequenceWizard] = None
def __init__(self, **kwargs):
self.mainwindow = kwargs.pop('mainwindow')
self.instrument = kwargs.pop('instrument')
self.scripts = []
super().__init__(**kwargs)
self.setupUi(self)
def setupUi(self, Form):
super().setupUi(Form)
self.startStopToolButton.clicked.connect(self.startStopScript)
self.newToolButton.clicked.connect(self.newScript)
self.saveToolButton.clicked.connect(self.saveScript)
self.saveAsToolButton.clicked.connect(self.saveScriptAs)
self.loadToolButton.clicked.connect(self.openScript)
self.wizardToolButton.clicked.connect(self.openScriptWizard)
menu = QtWidgets.QMenu()
menu.addAction(self.actionMeasurement_sequence_wizard)
menu.addAction(self.actionScan_wizard)
self.wizardToolButton.setMenu(menu)
self.actionMeasurement_sequence_wizard.triggered.connect(self.openScriptWizard)
self.actionScan_wizard.triggered.connect(self.openScanWizard)
self.copyToolButton.clicked.connect(self.editCopy)
self.cutToolButton.clicked.connect(self.editCut)
self.pasteToolButton.clicked.connect(self.editPaste)
self.undoToolButton.clicked.connect(self.undo)
self.redoToolButton.clicked.connect(self.redo)
self.tabWidget.currentChanged.connect(self.currentTabChanged)
self.tabWidget.tabCloseRequested.connect(self.tabCloseRequested)
QtWidgets.QApplication.clipboard().dataChanged.connect(self.onClipboardDataChanged)
self.newScript()
self.instrument.interpreter.scriptstarted.connect(self.onScriptStarted)
self.instrument.interpreter.scriptfinished.connect(self.onScriptFinished)
self.instrument.interpreter.advance.connect(self.onScriptAdvance)
self.instrument.interpreter.message.connect(self.onScriptMessage)
self.instrument.interpreter.flags.newFlag.connect(self.onNewFlag)
self.instrument.interpreter.flags.flagChanged.connect(self.onFlagChanged)
self.instrument.interpreter.flags.flagRemoved.connect(self.onFlagRemoved)
self.flagsHorizontalLayout.addStretch(1)
self.listWidget.clear()
for command in sorted([c for c in Command.subclasses() if isinstance(c.name, str)], key=lambda c:c.name):
item = QtWidgets.QListWidgetItem(command.name)
item.setToolTip(command.helptext())
self.listWidget.addItem(item)
def onNewFlag(self, flagname: str, flagstate: bool):
child = self.findChild(QtWidgets.QToolButton, f'flag_{flagname}_ToolButton')
assert child is None
tb = QtWidgets.QToolButton(self)
tb.setText(flagname)
tb.setCheckable(True)
tb.setChecked(flagstate)
tb.setObjectName(f'flag_{flagname}_ToolButton')
tb.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred))
tb.toggled.connect(self.onFlagToolButtonToggled)
self.flagsHorizontalLayout.insertWidget(self.flagsHorizontalLayout.count() - 1, tb)
def onFlagToolButtonToggled(self):
flagname = self.sender().objectName().split('_')[1]
self.instrument.interpreter.flags.setFlag(flagname, self.sender().isChecked())
def onFlagChanged(self, flagname: str, flagstate: bool):
logger.debug(f'onFlagChanged {flagname=}, {flagstate=}')
tb = self.findChild(QtWidgets.QToolButton, f'flag_{flagname}_ToolButton')
tb.blockSignals(True)
tb.setChecked(flagstate)
tb.blockSignals(False)
def onFlagRemoved(self, flagname: str):
tb = self.findChild(QtWidgets.QToolButton, f'flag_{flagname}_ToolButton')
self.flagsHorizontalLayout.removeWidget(tb)
tb.deleteLater()
def onScriptMessage(self, message: str):
self.currentScript().addMessage(message)
def onScriptAdvance(self, currentline: int):
logger.debug('onScriptAdvance')
scriptui = self.runningScript()
assert scriptui is not None
scriptui.scriptEditor.highlightRunningLine(currentline)
def onScriptStarted(self):
self.startStopToolButton.setText('Stop')
self.startStopToolButton.setIcon(QtGui.QIcon(QtGui.QPixmap(':/icons/stop.svg')))
self.currentScript().outputPlainTextEdit.setVisible(True)
self.currentScript().addMessage('Script started')
def onScriptFinished(self, success: bool, message: str):
if success:
self.currentScript().addMessage(f'Script finished successfully with message "{message}".')
else:
self.currentScript().addMessage(f'Script failed with message "{message}".')
self.startStopToolButton.setText('Start')
self.startStopToolButton.setIcon(QtGui.QIcon(QtGui.QPixmap(':/icons/start.svg')))
if not success:
QtWidgets.QMessageBox.critical(self, 'Script failed', f'Script failed with message: {message}')
sui = self.runningScript()
sui.scriptEditor.setReadOnly(False)
sui.scriptEditor.highlightCurrentLine()
def onClipboardDataChanged(self):
self.pasteToolButton.setEnabled(self.currentScript().canPaste())
def currentTabChanged(self, index: int):
scriptui = self.currentScript()
self.undoToolButton.setEnabled(scriptui.canUndo())
self.redoToolButton.setEnabled(scriptui.canRedo())
self.pasteToolButton.setEnabled(scriptui.canPaste())
def tabCloseRequested(self, index: int):
if self.scripts[index].isRunning():
QtWidgets.QMessageBox.critical(self, 'Script is running', 'Cannot close the running script!')
return
if self.scripts[index].isModified():
result = QtWidgets.QMessageBox.question(
self, 'Confirm close', 'There are unsaved changes to this script. Do you want to save them?',
buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if result == QtWidgets.QMessageBox.Yes:
self.scripts[index].save()
elif result == QtWidgets.QMessageBox.Cancel:
return
self.tabWidget.removeTab(index)
self.scripts[index].deleteLater()
del self.scripts[index]
def startStopScript(self):
if self.startStopToolButton.text() == 'Start':
try:
self.instrument.interpreter.parseScript(self.currentScript().text())
self.currentScript().scriptEditor.setReadOnly(True)
except ParsingError as pe:
QtWidgets.QMessageBox.critical('Parsing error',
f'Line {pe.args[0] + 1} is invalid. Error message: {pe.args[1]}')
return
self.instrument.interpreter.execute()
elif self.startStopToolButton.text() == 'Stop':
self.instrument.interpreter.stop()
def openScanWizard(self):
pass
def openScriptWizard(self):
if self.wizard is not None:
QtWidgets.QMessageBox.critical(self, 'Error', 'Another wizard is already open.')
return
self.wizard = SequenceWizard(parent=self)
self.wizard.finished.connect(self.onWizardFinished)
self.wizard.show()
def onWizardFinished(self, result: int):
# ToDo: create script.
logger.debug(f'Wizard finished with result {result}.')
self.wizard.close()
if result:
# try to find an unmodified Untitled script
try:
s = [s for s in self.scripts if (not s.text().strip()) and (not s.isModified())][0]
except IndexError:
s=self.newScript()
self.tabWidget.setCurrentWidget(s)
s.scriptEditor.setPlainText(self.wizard.script())
s.scriptEditor.document().setModified(True)
self.wizard.deleteLater()
self.wizard = None
def stopScriptAfterThisCommand(self):
pass
def newScript(self) -> ScriptUI:
s=ScriptUI()
self._createTab(s)
return s
def openScript(self):
filename = getOpenFile(self, 'Load a script...', '', 'CCT script files (*.cct);;All files (*)')
logger.debug(f'Got filename: {filename}')
if not filename:
return
sui = ScriptUI()
with open(filename, 'rt') as f:
sui.scriptEditor.document().setPlainText(f.read())
sui.scriptEditor.document().setModified(False)
sui.filename = filename
self._createTab(sui)
def saveScript(self):
self.currentScript().save()
def saveScriptAs(self):
self.currentScript().saveas()
def undo(self):
self.currentScript().undo()
def redo(self):
self.currentScript().redo()
def editCopy(self):
self.currentScript().editCopy()
def editCut(self):
self.currentScript().editCut()
def editPaste(self):
self.currentScript().editPaste()
def currentScript(self) -> ScriptUI:
return self.scripts[self.tabWidget.currentIndex()]
def _createTab(self, script: ScriptUI):
self.scripts.append(script)
self.tabWidget.addTab(self.scripts[-1], script.getTitle())
self.tabWidget.setCurrentIndex(len(self.scripts) - 1)
script.modificationChanged.connect(self.onScriptModificationChanged)
script.undoAvailable.connect(self.undoToolButton.setEnabled)
script.redoAvailable.connect(self.redoToolButton.setEnabled)
script.copyAvailable.connect(self.copyToolButton.setEnabled)
script.copyAvailable.connect(self.cutToolButton.setEnabled)
def onScriptModificationChanged(self, modified: bool):
self.tabWidget.setTabText(self.scripts.index(self.sender()), self.sender().getTitle())
def runningScript(self) -> Optional[ScriptUI]:
try:
return [s for s in self.scripts if s.isRunning()][0]
except IndexError:
return None
|
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated client library for storage version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
import storage_v1_messages as messages
class StorageV1(base_api.BaseApiClient):
"""Generated client library for service storage version v1."""
MESSAGES_MODULE = messages
_PACKAGE = u'storage'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/devstorage.full_control', u'https://www.googleapis.com/auth/devstorage.read_only', u'https://www.googleapis.com/auth/devstorage.read_write']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'StorageV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new storage handle."""
url = url or u'https://www.googleapis.com/storage/v1/'
super(StorageV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.bucketAccessControls = self.BucketAccessControlsService(self)
self.buckets = self.BucketsService(self)
self.channels = self.ChannelsService(self)
self.defaultObjectAccessControls = self.DefaultObjectAccessControlsService(self)
self.objectAccessControls = self.ObjectAccessControlsService(self)
self.objects = self.ObjectsService(self)
class BucketAccessControlsService(base_api.BaseApiService):
"""Service class for the bucketAccessControls resource."""
_NAME = u'bucketAccessControls'
def __init__(self, client):
super(StorageV1.BucketAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.bucketAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsDeleteRequest',
response_type_name=u'StorageBucketAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='',
request_type_name=u'StorageBucketAccessControlsGetRequest',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.bucketAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.bucketAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/acl',
request_field='',
request_type_name=u'StorageBucketAccessControlsListRequest',
response_type_name=u'BucketAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.bucketAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.bucketAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/acl/{entity}',
request_field='<request>',
request_type_name=u'BucketAccessControl',
response_type_name=u'BucketAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageBucketAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified bucket.
Args:
request: (StorageBucketAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified bucket.
Args:
request: (BucketAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BucketAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class BucketsService(base_api.BaseApiService):
"""Service class for the buckets resource."""
_NAME = u'buckets'
def __init__(self, client):
super(StorageV1.BucketsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.buckets.delete',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsDeleteRequest',
response_type_name=u'StorageBucketsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.get',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}',
request_field='',
request_type_name=u'StorageBucketsGetRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.buckets.insert',
ordered_params=[u'project'],
path_params=[],
query_params=[u'predefinedAcl', u'predefinedDefaultObjectAcl', u'project', u'projection'],
relative_path=u'b',
request_field=u'bucket',
request_type_name=u'StorageBucketsInsertRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.buckets.list',
ordered_params=[u'project'],
path_params=[],
query_params=[u'maxResults', u'pageToken', u'prefix', u'project', u'projection'],
relative_path=u'b',
request_field='',
request_type_name=u'StorageBucketsListRequest',
response_type_name=u'Buckets',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.buckets.patch',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsPatchRequest',
response_type_name=u'Bucket',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.buckets.update',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'predefinedDefaultObjectAcl', u'projection'],
relative_path=u'b/{bucket}',
request_field=u'bucketResource',
request_type_name=u'StorageBucketsUpdateRequest',
response_type_name=u'Bucket',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes an empty bucket.
Args:
request: (StorageBucketsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageBucketsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns metadata for the specified bucket.
Args:
request: (StorageBucketsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new bucket.
Args:
request: (StorageBucketsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves a list of buckets for a given project.
Args:
request: (StorageBucketsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Buckets) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a bucket. This method supports patch semantics.
Args:
request: (StorageBucketsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a bucket.
Args:
request: (StorageBucketsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Bucket) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ChannelsService(base_api.BaseApiService):
"""Service class for the channels resource."""
_NAME = u'channels'
def __init__(self, client):
super(StorageV1.ChannelsService, self).__init__(client)
self._method_configs = {
'Stop': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.channels.stop',
ordered_params=[],
path_params=[],
query_params=[],
relative_path=u'channels/stop',
request_field='<request>',
request_type_name=u'Channel',
response_type_name=u'StorageChannelsStopResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Stop(self, request, global_params=None):
"""Stop watching resources through this channel.
Args:
request: (Channel) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageChannelsStopResponse) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
class DefaultObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the defaultObjectAccessControls resource."""
_NAME = u'defaultObjectAccessControls'
def __init__(self, client):
super(StorageV1.DefaultObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.defaultObjectAccessControls.delete',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsDeleteRequest',
response_type_name=u'StorageDefaultObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.get',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.defaultObjectAccessControls.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.defaultObjectAccessControls.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/defaultObjectAcl',
request_field='',
request_type_name=u'StorageDefaultObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.defaultObjectAccessControls.patch',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.defaultObjectAccessControls.update',
ordered_params=[u'bucket', u'entity'],
path_params=[u'bucket', u'entity'],
query_params=[],
relative_path=u'b/{bucket}/defaultObjectAcl/{entity}',
request_field='<request>',
request_type_name=u'ObjectAccessControl',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageDefaultObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the default object ACL entry for the specified entity on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves default object ACL entries on the specified bucket.
Args:
request: (StorageDefaultObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket. This method supports patch semantics.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a default object ACL entry on the specified bucket.
Args:
request: (ObjectAccessControl) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectAccessControlsService(base_api.BaseApiService):
"""Service class for the objectAccessControls resource."""
_NAME = u'objectAccessControls'
def __init__(self, client):
super(StorageV1.ObjectAccessControlsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objectAccessControls.delete',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsDeleteRequest',
response_type_name=u'StorageObjectAccessControlsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.get',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field='',
request_type_name=u'StorageObjectAccessControlsGetRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objectAccessControls.insert',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsInsertRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objectAccessControls.list',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl',
request_field='',
request_type_name=u'StorageObjectAccessControlsListRequest',
response_type_name=u'ObjectAccessControls',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objectAccessControls.patch',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsPatchRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objectAccessControls.update',
ordered_params=[u'bucket', u'object', u'entity'],
path_params=[u'bucket', u'entity', u'object'],
query_params=[u'generation'],
relative_path=u'b/{bucket}/o/{object}/acl/{entity}',
request_field=u'objectAccessControl',
request_type_name=u'StorageObjectAccessControlsUpdateRequest',
response_type_name=u'ObjectAccessControl',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Permanently deletes the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectAccessControlsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the ACL entry for the specified entity on the specified object.
Args:
request: (StorageObjectAccessControlsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a new ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves ACL entries on the specified object.
Args:
request: (StorageObjectAccessControlsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControls) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an ACL entry on the specified object. This method supports patch semantics.
Args:
request: (StorageObjectAccessControlsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates an ACL entry on the specified object.
Args:
request: (StorageObjectAccessControlsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ObjectAccessControl) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ObjectsService(base_api.BaseApiService):
"""Service class for the objects resource."""
_NAME = u'objects'
def __init__(self, client):
super(StorageV1.ObjectsService, self).__init__(client)
self._method_configs = {
'Compose': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.compose',
ordered_params=[u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifMetagenerationMatch'],
relative_path=u'b/{destinationBucket}/o/{destinationObject}/compose',
request_field=u'composeRequest',
request_type_name=u'StorageObjectsComposeRequest',
response_type_name=u'Object',
supports_download=True,
),
'Copy': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.copy',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'projection', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsCopyRequest',
response_type_name=u'Object',
supports_download=True,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'storage.objects.delete',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsDeleteRequest',
response_type_name=u'StorageObjectsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.get',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field='',
request_type_name=u'StorageObjectsGetRequest',
response_type_name=u'Object',
supports_download=True,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.insert',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'contentEncoding', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'name', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o',
request_field=u'object',
request_type_name=u'StorageObjectsInsertRequest',
response_type_name=u'Object',
supports_download=True,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'storage.objects.list',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o',
request_field='',
request_type_name=u'StorageObjectsListRequest',
response_type_name=u'Objects',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'storage.objects.patch',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsPatchRequest',
response_type_name=u'Object',
supports_download=False,
),
'Rewrite': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.rewrite',
ordered_params=[u'sourceBucket', u'sourceObject', u'destinationBucket', u'destinationObject'],
path_params=[u'destinationBucket', u'destinationObject', u'sourceBucket', u'sourceObject'],
query_params=[u'destinationPredefinedAcl', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'ifSourceGenerationMatch', u'ifSourceGenerationNotMatch', u'ifSourceMetagenerationMatch', u'ifSourceMetagenerationNotMatch', u'maxBytesRewrittenPerCall', u'projection', u'rewriteToken', u'sourceGeneration'],
relative_path=u'b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}',
request_field=u'object',
request_type_name=u'StorageObjectsRewriteRequest',
response_type_name=u'RewriteResponse',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'storage.objects.update',
ordered_params=[u'bucket', u'object'],
path_params=[u'bucket', u'object'],
query_params=[u'generation', u'ifGenerationMatch', u'ifGenerationNotMatch', u'ifMetagenerationMatch', u'ifMetagenerationNotMatch', u'predefinedAcl', u'projection'],
relative_path=u'b/{bucket}/o/{object}',
request_field=u'objectResource',
request_type_name=u'StorageObjectsUpdateRequest',
response_type_name=u'Object',
supports_download=True,
),
'WatchAll': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'storage.objects.watchAll',
ordered_params=[u'bucket'],
path_params=[u'bucket'],
query_params=[u'delimiter', u'maxResults', u'pageToken', u'prefix', u'projection', u'versions'],
relative_path=u'b/{bucket}/o/watch',
request_field=u'channel',
request_type_name=u'StorageObjectsWatchAllRequest',
response_type_name=u'Channel',
supports_download=False,
),
}
self._upload_configs = {
'Insert': base_api.ApiUploadInfo(
accept=['*/*'],
max_size=None,
resumable_multipart=True,
resumable_path=u'/resumable/upload/storage/v1/b/{bucket}/o',
simple_multipart=True,
simple_path=u'/upload/storage/v1/b/{bucket}/o',
),
}
def Compose(self, request, global_params=None, download=None):
"""Concatenates a list of existing objects into a new object in the same bucket.
Args:
request: (StorageObjectsComposeRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Compose')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Copy(self, request, global_params=None, download=None):
"""Copies a source object to a destination object. Optionally overrides metadata.
Args:
request: (StorageObjectsCopyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Copy')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Delete(self, request, global_params=None):
"""Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.
Args:
request: (StorageObjectsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(StorageObjectsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None, download=None):
"""Retrieves an object or its metadata.
Args:
request: (StorageObjectsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def Insert(self, request, global_params=None, upload=None, download=None):
"""Stores a new object and metadata.
Args:
request: (StorageObjectsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
upload: (Upload, default: None) If present, upload
this stream with the request.
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Insert')
upload_config = self.GetUploadConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params,
upload=upload, upload_config=upload_config,
download=download)
def List(self, request, global_params=None):
"""Retrieves a list of objects matching the criteria.
Args:
request: (StorageObjectsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Objects) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates an object's metadata. This method supports patch semantics.
Args:
request: (StorageObjectsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Rewrite(self, request, global_params=None):
"""Rewrites a source object to a destination object. Optionally overrides metadata.
Args:
request: (StorageObjectsRewriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RewriteResponse) The response message.
"""
config = self.GetMethodConfig('Rewrite')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None, download=None):
"""Updates an object's metadata.
Args:
request: (StorageObjectsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
download: (Download, default: None) If present, download
data from the request via this stream.
Returns:
(Object) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params,
download=download)
def WatchAll(self, request, global_params=None):
"""Watch for changes on all objects in a bucket.
Args:
request: (StorageObjectsWatchAllRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Channel) The response message.
"""
config = self.GetMethodConfig('WatchAll')
return self._RunMethod(
config, request, global_params=global_params)
|
|
from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Environment, Rule, RuleStatus
from sentry.testutils import APITestCase
class ProjectRuleDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="bar")
rule = project1.rule_set.all()[0]
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
def test_with_environment(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="bar")
rule = project1.rule_set.all()[0]
rule.update(environment_id=Environment.get_or_create(rule.project, "production").id)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] == "production"
def test_with_null_environment(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(teams=[team], name="foo")
self.create_project(teams=[team], name="bar")
rule = project1.rule_set.all()[0]
rule.update(environment_id=None)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project1.organization.slug,
"project_slug": project1.slug,
"rule_id": rule.id,
},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
class UpdateProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
"key": "foo",
"match": "eq",
"value": "bar",
}
]
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": conditions,
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id is None
assert rule.data["action_match"] == "any"
assert rule.data["actions"] == [
{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}
]
assert rule.data["conditions"] == conditions
def test_update_name(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"environment": None,
"actionMatch": "all",
"frequency": 30,
"name": "test",
"conditions": [
{
"interval": "1h",
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"value": 666,
"name": "An issue is seen more than 30 times in 1m",
}
],
"id": rule.id,
"actions": [
{
"id": "sentry.rules.actions.notify_event.NotifyEventAction",
"name": "Send a notification (for all legacy integrations)",
}
],
"dateCreated": "2018-04-24T23:37:21.246Z",
},
format="json",
)
assert response.status_code == 200, response.content
assert (
response.data["conditions"][0]["name"] == "An issue is seen more than 666 times in 1h"
)
def test_with_environment(self):
self.login_as(user=self.user)
project = self.create_project()
Environment.get_or_create(project, "production")
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"environment": "production",
"actionMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": [
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] == "production"
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id == Environment.get_or_create(rule.project, "production").id
def test_with_null_environment(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(
project=project,
environment_id=Environment.get_or_create(project, "production").id,
label="foo",
)
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"environment": None,
"actionMatch": "any",
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"conditions": [
{"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition"}
],
},
format="json",
)
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(rule.id)
assert response.data["environment"] is None
rule = Rule.objects.get(id=rule.id)
assert rule.label == "hello world"
assert rule.environment_id is None
def test_invalid_rule_node_type(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"conditions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"actions": [],
},
format="json",
)
assert response.status_code == 400, response.content
def test_invalid_rule_node(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"conditions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
"actions": [{"id": "foo"}],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_not_valid(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"conditions": [{"id": "sentry.rules.conditions.tagged_event.TaggedEventCondition"}],
"actions": [],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_missing_condition(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"conditions": [],
"actions": [{"id": "sentry.rules.actions.notify_event.NotifyEventAction"}],
},
format="json",
)
assert response.status_code == 400, response.content
def test_rule_form_missing_action(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.put(
url,
data={
"name": "hello world",
"actionMatch": "any",
"action": [],
"conditions": [
{"id": "sentry.rules.conditions.tagged_event.TaggedEventCondition"}
],
},
format="json",
)
assert response.status_code == 400, response.content
class DeleteProjectRuleTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
project = self.create_project()
rule = Rule.objects.create(project=project, label="foo")
url = reverse(
"sentry-api-0-project-rule-details",
kwargs={
"organization_slug": project.organization.slug,
"project_slug": project.slug,
"rule_id": rule.id,
},
)
response = self.client.delete(url)
assert response.status_code == 202, response.content
rule = Rule.objects.get(id=rule.id)
assert rule.status == RuleStatus.PENDING_DELETION
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for working with files."""
import os
import pickle
import struct
import numpy as np
import pandas as pd
from PIL import Image
# LDIF is an internal package, should be imported last.
# pylint: disable=g-bad-import-order
from ldif.util import base_util
from ldif.util import mesh_util
# pylint: enable=g-bad-import-order
glob = base_util.FS.glob
exists = base_util.FS.exists
mkdir = base_util.FS.mkdir
makedirs = base_util.FS.makedirs
cp = base_util.FS.cp
rm = base_util.FS.rm
open_file = base_util.FS.open
log = base_util.LOG
def readlines(p):
with base_util.FS.open(p, 'rt') as f:
return [x for x in f.read().split('\n') if x]
def readbin(p):
with base_util.FS.open(p, 'rb') as f:
return f.read()
def writebin(p, s):
with base_util.FS.open(p, 'wb') as f:
f.write(s)
def writetxt(p, s):
with base_util.FS.open(p, 'wt') as f:
f.write(s)
def write_np(path, arr):
with base_util.FS.open(path, 'wb') as f:
np.save(f, arr)
def read_grd(path):
"""Reads a GAPS .grd file into a (tx, grd) pair."""
with base_util.FS.open(path, 'rb') as f:
content = f.read()
res = struct.unpack('iii', content[:4 * 3])
vcount = res[0] * res[1] * res[2]
# if res[0] != 32 or res[1] != 32 or res[2] != 32:
# raise ValueError(f'Expected a resolution of 32^3 but got '
# f'({res[0]}, {res[1]}, {res[2]}) for example {path}.')
content = content[4 * 3:]
tx = struct.unpack('f' * 16, content[:4 * 16])
tx = np.array(tx).reshape([4, 4]).astype(np.float32)
content = content[4 * 16:]
grd = struct.unpack('f' * vcount, content[:4 * vcount])
grd = np.array(grd).reshape(res).astype(np.float32)
return tx, grd
def read_sif_v1(path, verbose=False):
"""Reads a version 1 SIF .txt file and returns a numpy array."""
text = readlines(path)
header = text[0]
# header_tokens = header.split(' ')
if header != 'SIF':
raise ValueError(f'Path {path} does not contain a SIF file.')
shape_count, version, implicit_len = [int(x) for x in text[1].split(' ')]
if version != 0:
raise ValueError(f'Expected SIF version identifier 0 but got {version}.')
assert shape_count > 0
assert implicit_len > 0
assert len(text) == shape_count + 2
rep = []
for idx in range(shape_count):
row = text[idx + 2]
elements = row.split(' ')
explicits = [float(x) for x in elements[:10]]
explicits[4] = explicits[4] * explicits[4]
explicits[5] = explicits[5] * explicits[5]
explicits[6] = explicits[6] * explicits[6]
# log.info(elements[10])
if verbose:
symmetry = bool(int(elements[10]))
log.info(f"Row {idx} {'is' if symmetry else 'is not'} symmetric.")
implicits = [float(x) for x in elements[11:]]
if verbose:
has_implicits = bool(implicits)
log.info(
f"Row {idx} {'has' if has_implicits else 'does not have'} implicits.")
# TODO(kgenova) Validate the SIF embedding matches the expected symmetry.
rep.append(explicits + implicits)
rep = np.array(rep, dtype=np.float32)
if verbose:
log.info(f'Representation shape is {rep.shape}')
return rep
def read_lines(p):
with base_util.FS.open(p, 'rt') as f:
contents = f.read()
split_s = '\n'
ls = [x for x in contents.split(split_s) if x]
return ls
def read_image(p):
"""Reads in an image file that PIL.Image supports and converts to an array."""
with base_util.FS.open(p, 'rb') as f:
arr = np.array(Image.open(f), dtype=np.float32)
return arr
def read_npz(p):
if p[-4:] != '.npz':
raise ValueError(f'Expected .npz ending for file {p}.')
with base_util.FS.open(p, 'rb') as f:
arr = dict(np.load(f, allow_pickle=True))
return arr
def read_np(p):
if p[-4:] != '.npy':
raise ValueError(f'Expected .npy ending for file {p}.')
with base_util.FS.open(p, 'rb') as f:
arr = np.load(f)
return arr
def read_txt_to_np(p):
with base_util.FS.open(p, 'rt') as f:
return np.loadtxt(f)
def read_py2_pkl(p):
if p[-4:] != '.pkl':
raise ValueError(f'Expected .pkl ending for file {p}.')
with base_util.FS.open(p, 'rb') as f:
# pkl = dict(np.load(f, allow_pickle=True))
pkl = pickle.load(f, encoding='latin1')
return pkl
def write_mesh(path, mesh):
mesh_str = mesh_util.serialize(mesh)
with base_util.FS.open(path, 'wb') as f:
f.write(mesh_str)
def read_mesh(path):
with base_util.FS.open(path, 'rb') as f:
mesh_str = f.read()
return mesh_util.deserialize(mesh_str)
def read_csv(path):
with base_util.FS.open(path, 'rt') as f:
df = pd.read_csv(f)
return df
def read_normals(path_to_dir, im_count=20, leading='_depth'):
"""Loads gaps normals files from a conf2img output directory."""
# Now load the files:
out = []
for i in range(im_count):
base = f'{path_to_dir}/{str(i).zfill(6)}{leading}'
paths = [base + ext for ext in ['_nx.png', '_ny.png', '_nz.png']]
ns = [read_image(path) / 32768.0 - 1.0 for path in paths]
normals = np.stack(ns, axis=-1)
# We need to renormalize but the background is zero, can't divide by that:
nrm = np.linalg.norm(normals, axis=-1, keepdims=True)
is_background = np.squeeze(nrm > 1.1)
normals /= np.maximum(nrm, 1e-10)
normals[is_background, :] = 0.0
out.append(normals)
return np.stack(out).astype(np.float32)
def write_points(path_ext_optional, points):
"""Writes a pointcloud in the most appropriate GAPS format.
Args:
path_ext_optional: String. The path for the file to write. A file extension
is optional. If there is a file extension, it must match the data
dimensionality.
points: Numpy array with shape [point_count, 3/4/6]. A set of XYZ points,
optionally with a weight/value or normals (both simultaneously is not
supported).
Returns:
The path to the written file, with an extension.
"""
has_df = points.shape[-1] == 4
has_ext = path_ext_optional[-4] == '.'
path_no_ext, ext = os.path.splitext(path_ext_optional)
has_ext = bool(ext)
if has_df:
ptspath = path_no_ext + '.sdf'
if has_ext:
assert ext == '.sdf'
points = points.reshape([-1, 4])
with base_util.FS.open(ptspath, 'wb') as f:
# GAPS expects the floats serialized to disk to be 32-bit:
points = points.astype(np.float32)
f.write(points.tobytes())
else:
ptspath = path_no_ext + '.pts'
if has_ext:
assert ext == '.pts'
has_normals = points.shape[-1] == 6
if not has_normals:
points = points[..., :3]
normals = np.zeros_like(points)
points = np.concatenate([points, normals], axis=-1)
points = np.reshape(points, [-1, 6])
with base_util.FS.open(ptspath, 'wb') as f:
f.write(points.astype(np.float32).tobytes())
return ptspath
def write_grd(path, volume, world2grid=None):
"""Writes a GAPS .grd file containing a voxel grid and world2grid matrix."""
volume = np.squeeze(volume)
assert len(volume.shape) == 3
header = [int(s) for s in volume.shape]
if world2grid is not None:
header += [x.astype(np.float32) for x in np.reshape(world2grid, [16])]
log.info('header: ', repr(header))
else:
header += [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
header = struct.pack(3*'i' + 16*'f', *header)
content = volume.astype('f').tostring()
with base_util.FS.open(path, 'wb') as f:
f.write(header)
f.write(content)
def write_depth_image(path, depth_image):
depth_image = (depth_image * 1000).astype(np.uint16)
array_buffer = depth_image.tobytes()
img = Image.new('I', depth_image.T.shape)
img.frombytes(array_buffer, 'raw', 'I;16')
with base_util.FS.open(path, 'wb') as f:
img.save(f, format='PNG')
|
|
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import re
import tempfile
import os
import subprocess
import time
import gdb
kSmiTag = 0
kSmiTagSize = 1
kSmiTagMask = (1 << kSmiTagSize) - 1
kHeapObjectTag = 1
kHeapObjectTagSize = 2
kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1
kFailureTag = 3
kFailureTagSize = 2
kFailureTagMask = (1 << kFailureTagSize) - 1
kSmiShiftSize32 = 0
kSmiValueSize32 = 31
kSmiShiftBits32 = kSmiTagSize + kSmiShiftSize32
kSmiShiftSize64 = 31
kSmiValueSize64 = 32
kSmiShiftBits64 = kSmiTagSize + kSmiShiftSize64
kAllBits = 0xFFFFFFFF
kTopBit32 = 0x80000000
kTopBit64 = 0x8000000000000000
t_u32 = gdb.lookup_type('unsigned int')
t_u64 = gdb.lookup_type('unsigned long long')
def has_smi_tag(v):
return v & kSmiTagMask == kSmiTag
def has_failure_tag(v):
return v & kFailureTagMask == kFailureTag
def has_heap_object_tag(v):
return v & kHeapObjectTagMask == kHeapObjectTag
def raw_heap_object(v):
return v - kHeapObjectTag
def smi_to_int_32(v):
v = v & kAllBits
if (v & kTopBit32) == kTopBit32:
return ((v & kAllBits) >> kSmiShiftBits32) - 2147483648
else:
return (v & kAllBits) >> kSmiShiftBits32
def smi_to_int_64(v):
return (v >> kSmiShiftBits64)
def decode_v8_value(v, bitness):
base_str = 'v8[%x]' % v
if has_smi_tag(v):
if bitness == 32:
return base_str + (" SMI(%d)" % smi_to_int_32(v))
else:
return base_str + (" SMI(%d)" % smi_to_int_64(v))
elif has_failure_tag(v):
return base_str + " (failure)"
elif has_heap_object_tag(v):
return base_str + (" H(0x%x)" % raw_heap_object(v))
else:
return base_str
class V8ValuePrinter(object):
"Print a v8value."
def __init__(self, val):
self.val = val
def to_string(self):
if self.val.type.sizeof == 4:
v_u32 = self.val.cast(t_u32)
return decode_v8_value(int(v_u32), 32)
elif self.val.type.sizeof == 8:
v_u64 = self.val.cast(t_u64)
return decode_v8_value(int(v_u64), 64)
else:
return 'v8value?'
def display_hint(self):
return 'v8value'
def v8_pretty_printers(val):
lookup_tag = val.type.tag
if lookup_tag == None:
return None
elif lookup_tag == 'v8value':
return V8ValuePrinter(val)
return None
gdb.pretty_printers.append(v8_pretty_printers)
def v8_to_int(v):
if v.type.sizeof == 4:
return int(v.cast(t_u32))
elif v.type.sizeof == 8:
return int(v.cast(t_u64))
else:
return '?'
def v8_get_value(vstring):
v = gdb.parse_and_eval(vstring)
return v8_to_int(v)
class V8PrintObject (gdb.Command):
"""Prints a v8 object."""
def __init__ (self):
super (V8PrintObject, self).__init__ ("v8print", gdb.COMMAND_DATA)
def invoke (self, arg, from_tty):
v = v8_get_value(arg)
gdb.execute('call __gdb_print_v8_object(%d)' % v)
V8PrintObject()
class FindAnywhere (gdb.Command):
"""Search memory for the given pattern."""
MAPPING_RE = re.compile(r"^\s*\[\d+\]\s+0x([0-9A-Fa-f]+)->0x([0-9A-Fa-f]+)")
LIVE_MAPPING_RE = re.compile(r"^\s+0x([0-9A-Fa-f]+)\s+0x([0-9A-Fa-f]+)")
def __init__ (self):
super (FindAnywhere, self).__init__ ("find-anywhere", gdb.COMMAND_DATA)
def find (self, startAddr, endAddr, value):
try:
result = gdb.execute(
"find 0x%s, 0x%s, %s" % (startAddr, endAddr, value),
to_string = True)
if result.find("not found") == -1:
print(result)
except:
pass
def invoke (self, value, from_tty):
for l in gdb.execute("maint info sections", to_string = True).split('\n'):
m = FindAnywhere.MAPPING_RE.match(l)
if m is None:
continue
self.find(m.group(1), m.group(2), value)
for l in gdb.execute("info proc mappings", to_string = True).split('\n'):
m = FindAnywhere.LIVE_MAPPING_RE.match(l)
if m is None:
continue
self.find(m.group(1), m.group(2), value)
FindAnywhere()
class Redirect(gdb.Command):
"""Redirect the subcommand's stdout to a temporary file.
Usage: redirect subcommand...
Example:
redirect job 0x123456789
redirect x/1024xg 0x12345678
If provided, the generated temporary file is directly openend with the
GDB_EXTERNAL_EDITOR environment variable.
"""
def __init__(self):
super(Redirect, self).__init__("redirect", gdb.COMMAND_USER)
def invoke(self, subcommand, from_tty):
old_stdout = gdb.execute(
"p (int)dup(1)", to_string=True).split("=")[-1].strip()
try:
time_suffix = time.strftime("%Y%m%d-%H%M%S")
fd, file = tempfile.mkstemp(suffix="-%s.gdbout" % time_suffix)
try:
# Temporaily redirect stdout to the created tmp file for the
# duration of the subcommand.
gdb.execute('p (int)dup2(open("%s", 1), 1)' % file, to_string=True)
# Execute subcommand non interactively.
result = gdb.execute(subcommand, from_tty=False, to_string=True)
# Write returned string results to the temporary file as well.
with open(file, 'a') as f:
f.write(result)
# Open generated result.
if 'GDB_EXTERNAL_EDITOR' in os.environ:
open_cmd = os.environ['GDB_EXTERNAL_EDITOR']
print("Opening '%s' with %s" % (file, open_cmd))
subprocess.call([open_cmd, file])
else:
print("Open output:\n %s '%s'" % (os.environ['EDITOR'], file))
finally:
# Restore original stdout.
gdb.execute("p (int)dup2(%s, 1)" % old_stdout, to_string=True)
# Close the temporary file.
os.close(fd)
finally:
# Close the originally duplicated stdout descriptor.
gdb.execute("p (int)close(%s)" % old_stdout, to_string=True)
Redirect()
|
|
# Copyright 2008-2010 Neil Martinsen-Burrell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Defines a file-derived class to read/write Fortran unformatted files.
The assumption is that a Fortran unformatted file is being written by
the Fortran runtime as a sequence of records. Each record consists of
an integer (of the default size [usually 32 or 64 bits]) giving the
length of the following data in bytes, then the data itself, then the
same integer as before.
Examples
--------
To use the default endian and precision settings, one can just do::
>>> f = FortranFile('filename')
>>> x = f.readReals()
One can read arrays with varying precisions::
>>> f = FortranFile('filename')
>>> x = f.readInts('h')
>>> y = f.readInts('q')
>>> z = f.readReals('f')
Where the format codes are those used by Python's struct module.
One can change the default endian-ness and header precision::
>>> f = FortranFile('filename', endian='>', header_prec='l')
for a file with little-endian data whose record headers are long
integers.
"""
__docformat__ = "restructuredtext en"
import numpy
class FortranFile(file):
"""File with methods for dealing with fortran unformatted data files"""
def _get_header_length(self):
return numpy.dtype(self._header_prec).itemsize
_header_length = property(fget=_get_header_length)
def _set_endian(self,c):
"""Set endian to big (c='>') or little (c='<') or native (c='=')
:Parameters:
`c` : string
The endian-ness to use when reading from this file.
"""
if c in '<>@=':
if c == '@':
c = '='
self._endian = c
else:
raise ValueError('Cannot set endian-ness')
def _get_endian(self):
return self._endian
ENDIAN = property(fset=_set_endian,
fget=_get_endian,
doc="Possible endian values are '<', '>', '@', '='"
)
def _set_header_prec(self, prec):
if prec in 'hilq':
self._header_prec = prec
else:
raise ValueError('Cannot set header precision')
def _get_header_prec(self):
return self._header_prec
HEADER_PREC = property(fset=_set_header_prec,
fget=_get_header_prec,
doc="Possible header precisions are 'h', 'i', 'l', 'q'"
)
def __init__(self, fname, endian='@', header_prec='i', *args, **kwargs):
"""Open a Fortran unformatted file for writing.
Parameters
----------
endian : character, optional
Specify the endian-ness of the file. Possible values are
'>', '<', '@' and '='. See the documentation of Python's
struct module for their meanings. The deafult is '>' (native
byte order)
header_prec : character, optional
Specify the precision used for the record headers. Possible
values are 'h', 'i', 'l' and 'q' with their meanings from
Python's struct module. The default is 'i' (the system's
default integer).
"""
file.__init__(self, fname, *args, **kwargs)
self.ENDIAN = endian
self.HEADER_PREC = header_prec
def _read_exactly(self, num_bytes):
"""Read in exactly num_bytes, raising an error if it can't be done."""
data = ''
while True:
l = len(data)
if l == num_bytes:
return data
else:
read_data = self.read(num_bytes - l)
if read_data == '':
raise IOError('Could not read enough data.'
' Wanted %d bytes, got %d.' % (num_bytes, l))
data += read_data
def _read_check(self):
return numpy.fromstring(self._read_exactly(self._header_length),
dtype=self.ENDIAN+self.HEADER_PREC
)[0]
def _write_check(self, number_of_bytes):
"""Write the header for the given number of bytes"""
self.write(numpy.array(number_of_bytes,
dtype=self.ENDIAN+self.HEADER_PREC,).tostring()
)
def readRecord(self):
"""Read a single fortran record"""
l = self._read_check()
data_str = self._read_exactly(l)
check_size = self._read_check()
if check_size != l:
raise IOError('Error reading record from data file')
return data_str
def writeRecord(self,s):
"""Write a record with the given bytes.
Parameters
----------
s : the string to write
"""
length_bytes = len(s)
self._write_check(length_bytes)
self.write(s)
self._write_check(length_bytes)
def readString(self):
"""Read a string."""
return self.readRecord()
def writeString(self,s):
"""Write a string
Parameters
----------
s : the string to write
"""
self.writeRecord(s)
_real_precisions = 'df'
def readReals(self, prec='f'):
"""Read in an array of real numbers.
Parameters
----------
prec : character, optional
Specify the precision of the array using character codes from
Python's struct module. Possible values are 'd' and 'f'.
"""
_numpy_precisions = {'d': numpy.float64,
'f': numpy.float32
}
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
return numpy.fromstring(data_str, dtype=self.ENDIAN+prec)
def writeReals(self, reals, prec='f'):
"""Write an array of floats in given precision
Parameters
----------
reals : array
Data to write
prec` : string
Character code for the precision to use in writing
"""
if prec not in self._real_precisions:
raise ValueError('Not an appropriate precision')
nums = numpy.array(reals, dtype=self.ENDIAN+prec)
self.writeRecord(nums.tostring())
_int_precisions = 'hilq'
def readInts(self, prec='i'):
"""Read an array of integers.
Parameters
----------
prec : character, optional
Specify the precision of the data to be read using
character codes from Python's struct module. Possible
values are 'h', 'i', 'l' and 'q'
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
data_str = self.readRecord()
return numpy.fromstring(data_str, dtype=self.ENDIAN+prec)
def writeInts(self, ints, prec='i'):
"""Write an array of integers in given precision
Parameters
----------
reals : array
Data to write
prec : string
Character code for the precision to use in writing
"""
if prec not in self._int_precisions:
raise ValueError('Not an appropriate precision')
nums = numpy.array(ints, dtype=self.ENDIAN+prec)
self.writeRecord(nums.tostring())
|
|
"""IEM Tracker Related Stuff."""
import datetime
import smtplib
from email.mime.text import MIMEText
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
from pyiem.util import get_dbconn
class TrackerEngine:
"""A processing engine of tracking offline/online events"""
def __init__(self, icursor, pcursor, maxactions=0):
"""Constructor of TrackerEngine object
We need to be provided with psycopg2 cursors to both the `iem` database
and the `portfolio` database as we have business logic in both places
Args:
icursor (cursor): psycopg2 cursor to the iem database
pcursor (cursor): psycopg2 cursor to the portfolio database
maxactions (int, optional): threshold for now many email actions we
allow before we don't wish to spam our users. 0 implies no limit
"""
self.icursor = icursor
self.pcursor = pcursor
self.maxactions = maxactions
self.action_count = 0
self.emails = {}
def send_emails(self, really_send=True):
"""Send out those SPAM emails!"""
# Don't do anything if we have exceeded maxoffline
if self.action_count >= self.maxactions and self.maxactions > 0:
return
if not really_send:
return
s = smtplib.SMTP()
s.connect()
for email in self.emails:
msg = MIMEText(self.emails[email]["body"])
msg["From"] = "akrherz@iastate.edu"
msg["Subject"] = self.emails[email]["subject"]
s.sendmail(msg["From"], email, msg.as_string())
s.close()
def offline_logic(self, sid, ob, pnetwork, nt):
"""offline logic
Args:
sid (str): site identifier
ob (dict): observation dictionary
pnetwork (str): Portfolio name of this network
nt (dict): provider of station metadata
"""
# Get a listing of OPEN tickets
open_tickets = ""
self.pcursor.execute(
"SELECT id, entered, subject from tt_base WHERE portfolio = %s "
"and s_mid = %s and status != 'CLOSED' ORDER by id DESC",
(pnetwork, sid),
)
for row in self.pcursor:
open_tickets += (" %-6s %16s %s\n" "") % (
row[0],
row[1].strftime("%Y-%m-%d %I %p"),
row[2],
)
# Get a listing of past 4 closed tickets
closed_tickets = ""
self.pcursor.execute(
"SELECT id, entered, subject from tt_base WHERE portfolio = %s "
"and s_mid = %s and status = 'CLOSED' ORDER by id DESC LIMIT 5",
(pnetwork, sid),
)
for row in self.pcursor:
closed_tickets += (" %-6s %16s %s\n" "") % (
row[0],
row[1].strftime("%Y-%m-%d %I %p"),
row[2],
)
if closed_tickets == "":
closed_tickets = " --None-- "
if open_tickets == "":
open_tickets = " --None-- "
# Create an entry in tt_base
self.pcursor.execute(
"INSERT into tt_base (portfolio, s_mid, subject, "
"status, author) VALUES (%s, %s, %s, %s, %s) RETURNING id",
(pnetwork, sid, "Site Offline", "OPEN", "mesonet"),
)
trackerid = self.pcursor.fetchone()[0]
# Create a tt_log entry
lts = ob["valid"].astimezone(ZoneInfo(nt.sts[sid]["tzname"]))
msg = "Site Offline since %s" % (lts.strftime("%d %b %Y %H:%M %Z"),)
self.pcursor.execute(
"INSERT into tt_log (portfolio, s_mid, author, status_c, "
"comments, tt_id) VALUES (%s, %s, %s, %s, %s, %s)",
(pnetwork, sid, "mesonet", "OKAY", msg, trackerid),
)
# Update iemaccess
self.icursor.execute(
"INSERT into offline(station, network, "
"valid, trackerid) VALUES (%s, %s, %s, %s)",
(sid, nt.sts[sid]["network"], ob["valid"], trackerid),
)
mailstr = f"""
----------------------
| IEM TRACKER REPORT | New Ticket Generated: # {trackerid}
================================================================
ID : {sid} [IEM Network: {nt.sts[sid]["network"]}]
Station Name : {nt.sts[sid]["name"]}
Status Change : [OFFLINE] Site is NOT reporting to the IEM
Last Observation : {lts.strftime("%d %b %Y %I:%M %p %Z")}
Other Currently 'Open' Tickets for this Site:
# OPENED_ON TICKET TITLE
{open_tickets}
Most Recently 'Closed' Trouble Tickets for this Site:
# CLOSED_ON TICKET TITLE
{closed_tickets}
================================================================
"""
# Get contacts for site
self.pcursor.execute(
"SELECT distinct email from iem_site_contacts "
"WHERE s_mid = %s and email is not NULL",
(sid,),
)
for row in self.pcursor:
email = row[0].lower()
if email not in self.emails:
subject = f"[IEM] {nt.sts[sid]['name']} Offline"
self.emails[email] = {"subject": subject, "body": mailstr}
else:
subject = "[IEM] Multiple Sites"
self.emails[email]["subject"] = subject
self.emails[email]["body"] += "\n=========\n"
self.emails[email]["body"] += mailstr
def online_logic(self, sid, offline, ob, pnetwork, nt):
"""online logic
Args:
sid (str): site identifier
offline (dict): dictionary of offline metadata
ob (dict): observation dictionary
pnetwork (str): Portfolio name of this network
nt (dict): provider of station metadata
"""
trackerid = offline[sid]["trackerid"]
# Create Log Entry
cmt = ("Site Back Online at: %s" "") % (
ob["valid"].strftime("%Y-%m-%d %H:%M:%S"),
)
self.pcursor.execute(
"INSERT into tt_log (portfolio, s_mid, author, status_c, "
"comments, tt_id) VALUES (%s, %s, %s, %s, %s, %s)",
(pnetwork, sid, "mesonet", "CLOSED", cmt, trackerid),
)
# Update tt_base
self.pcursor.execute(
"UPDATE tt_base SET last = now(), status = 'CLOSED' WHERE id = %s",
(trackerid,),
)
# Update iemaccess
self.icursor.execute(
"DELETE from offline where station = %s and network = %s",
(sid, nt.sts[sid]["network"]),
)
ltz = ZoneInfo(nt.sts[sid]["tzname"])
lts = ob["valid"].astimezone(ltz)
delta = ob["valid"] - offline[sid]["valid"]
days = delta.days
hours = delta.seconds / 3600.0
minutes = (delta.seconds % 3600) / 60.0
duration = "%.0f days %.0f hours %.0f minutes" % (days, hours, minutes)
mailstr = f"""
---------------------------------
| *** IEM TRACKER REPORT *** |
------------------------------------------------------------
ID : {sid} [IEM Network: {nt.sts[sid]["network"]}]
Station Name : {nt.sts[sid]["name"]}
Status Change : [ONLINE] Site is reporting to the IEM
Trouble Ticket# : {trackerid}
Last Observation : {lts.strftime("%d %b %Y %I:%M %p %Z")}
Outage Duration : {duration}
IEM Tracker Action: This trouble ticket has been marked
CLOSED pending any further information.
------------------------------------------------------------
* If you have any information pertaining to this outage,
please directly respond to this email.
* Questions about this alert? Email: akrherz@iastate.edu
* Thanks!!!
"""
# Get contacts for site
self.pcursor.execute(
"SELECT distinct email from iem_site_contacts WHERE "
"s_mid = %s and email is not NULL",
(sid,),
)
for row in self.pcursor:
email = row[0].lower()
if email not in self.emails:
subject = ("[IEM] %s Online" "") % (nt.sts[sid]["name"],)
self.emails[email] = {"subject": subject, "body": mailstr}
else:
subject = "[IEM] Multiple Sites"
self.emails[email]["subject"] = subject
self.emails[email]["body"] += "\n=========\n"
self.emails[email]["body"] += mailstr
def process_network(self, obs, pnetwork, nt, threshold):
"""Process a list of dicts representing the network's observations
Args:
obs (list): list of dicts representing the network obs
pnetwork (str): the identifier of this network used in Portfolio DB
nt (NetworkTable): dictionary provider of station metadata
threshold (datetime): datetime instance with tzinfo set represeting
the minimum time a site is considered to be 'online' within
"""
network = nt.sts[list(nt.sts.keys())[0]]["network"]
self.icursor.execute(
"SELECT station, trackerid, valid from offline WHERE network = %s",
(network,),
)
offline = {}
for row in self.icursor:
offline[row[0]] = {"trackerid": row[1], "valid": row[2]}
for sid in obs:
ob = obs[sid]
if ob["valid"] > threshold:
# print '%s is online, offlinekeys: %s' % (sid,
# str(offline.keys()))
if sid in offline:
self.action_count += 1
self.online_logic(sid, offline, ob, pnetwork, nt)
continue
if sid in offline:
# NOOP
# print '%s is offline and known offline' % (sid, )
continue
# We must act!
# print '%s is offline' % (sid, )
self.action_count += 1
self.offline_logic(sid, ob, pnetwork, nt)
def loadqc(cursor=None, date=None):
"""Load the current IEM Tracker QC'd variables
Args:
cursor (cursor,optional): Optionally provided database cursor
date (date,optional): Defaults to today, which tickets are valid for now
"""
if date is None:
date = datetime.date.today()
qdict = {}
if cursor is None:
portfolio = get_dbconn("portfolio", user="nobody")
cursor = portfolio.cursor()
cursor.execute(
"select s_mid, sensor, status from tt_base WHERE sensor is not null "
"and date(entered) <= %s and (status != 'CLOSED' or closed > %s) "
"and s_mid is not null",
(date, date),
)
for row in cursor:
sid = row[0]
if row[0] not in qdict:
qdict[sid] = {}
for vname in row[1].split(","):
qdict[sid][vname.strip()] = True
return qdict
|
|
##############################################################################
# Copyright 2016-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""
Contains the core pyQuil objects that correspond to Quil instructions.
"""
import sys
import collections
import numpy as np
from numbers import Complex
from typing import (
Any,
Callable,
ClassVar,
Container,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
from warnings import warn
from pyquil.quilatom import (
Expression,
ExpressionDesignator,
Label,
LabelPlaceholder,
MemoryReference,
Parameter,
ParameterDesignator,
Frame,
Waveform,
Qubit,
QubitDesignator,
QubitPlaceholder,
FormalArgument,
_contained_parameters,
format_parameter,
unpack_qubit,
_complex_str,
)
if TYPE_CHECKING:
from pyquil.paulis import PauliSum
if sys.version_info < (3, 7):
from pyquil.external.dataclasses import dataclass
else:
from dataclasses import dataclass
class AbstractInstruction(object):
"""
Abstract class for representing single instructions.
"""
def out(self) -> str:
pass
def __str__(self) -> str:
return self.out()
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__) and self.out() == other.out()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __hash__(self) -> int:
return hash(self.out())
RESERVED_WORDS: Container[str] = [
"DEFGATE",
"DEFCIRCUIT",
"MEASURE",
"LABEL",
"HALT",
"JUMP",
"JUMP-WHEN",
"JUMP-UNLESS",
"RESET",
"WAIT",
"NOP",
"INCLUDE",
"PRAGMA",
"DECLARE",
"NEG",
"NOT",
"AND",
"IOR",
"XOR",
"MOVE",
"EXCHANGE",
"CONVERT",
"ADD",
"SUB",
"MUL",
"DIV",
"EQ",
"GT",
"GE",
"LT",
"LE",
"LOAD",
"STORE",
# Quil-T additions:
"DEFCAL",
"DEFFRAME",
"DEFWAVEFORM",
"PULSE",
"CAPTURE",
"RAW-CAPTURE",
"DELAY",
"FENCE",
"SET-FREQUENCY",
"SET-PHASE",
"SHIFT-PHASE",
"SWAP-PHASES",
"SET-SCALE",
"SAMPLE-RATE",
"INITIAL-FREQUENCY",
# to be removed:
"TRUE",
"FALSE",
"OR",
]
def _extract_qubit_index(
qubit: Union[Qubit, QubitPlaceholder, FormalArgument], index: bool = True
) -> QubitDesignator:
if index and isinstance(qubit, Qubit):
return qubit.index
return qubit
def _get_frame_qubits(frame: Frame, index: bool = True) -> Set[QubitDesignator]:
for q in frame.qubits:
if isinstance(q, FormalArgument):
raise ValueError("Attempted to extract FormalArgument where a Qubit is expected.")
return {_extract_qubit_index(q, index) for q in cast(List[Qubit], frame.qubits)}
def _format_qubit_str(qubit: Union[Qubit, QubitPlaceholder, FormalArgument]) -> str:
if isinstance(qubit, QubitPlaceholder):
return "{%s}" % str(qubit)
return str(qubit)
def _format_qubits_str(qubits: Iterable[Union[Qubit, QubitPlaceholder, FormalArgument]]) -> str:
return " ".join([_format_qubit_str(qubit) for qubit in qubits])
def _format_qubits_out(qubits: Iterable[Union[Qubit, QubitPlaceholder, FormalArgument]]) -> str:
return " ".join([qubit.out() for qubit in qubits])
def _format_params(params: Iterable[ParameterDesignator]) -> str:
return "(" + ",".join(format_parameter(param) for param in params) + ")"
def _join_strings(*args: str) -> str:
return " ".join(map(str, args))
class Gate(AbstractInstruction):
"""
This is the pyQuil object for a quantum gate instruction.
"""
def __init__(
self,
name: str,
params: Iterable[ParameterDesignator],
qubits: Iterable[Union[Qubit, QubitPlaceholder, FormalArgument]],
):
if not isinstance(name, str):
raise TypeError("Gate name must be a string")
if name in RESERVED_WORDS:
raise ValueError(
"Cannot use {} for a gate name since it's a reserved word".format(name)
)
if not isinstance(params, collections.abc.Iterable):
raise TypeError("Gate params must be an Iterable")
if not isinstance(qubits, collections.abc.Iterable):
raise TypeError("Gate arguments must be an Iterable")
for qubit in qubits:
if not isinstance(qubit, (Qubit, QubitPlaceholder, FormalArgument)):
raise TypeError("Gate arguments must all be Qubits")
qubits_list = list(qubits)
if len(qubits_list) == 0:
raise TypeError("Gate arguments must be non-empty")
self.name = name
self.params = list(params)
self.qubits = qubits_list
self.modifiers: List[str] = []
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return {_extract_qubit_index(q, indices) for q in self.qubits}
def out(self) -> str:
if self.params:
return "{}{}{} {}".format(
" ".join(self.modifiers) + " " if self.modifiers else "",
self.name,
_format_params(self.params),
_format_qubits_out(self.qubits),
)
else:
return "{}{} {}".format(
" ".join(self.modifiers) + " " if self.modifiers else "",
self.name,
_format_qubits_out(self.qubits),
)
def controlled(
self, control_qubit: Union[QubitDesignator, Sequence[QubitDesignator]]
) -> "Gate":
"""
Add the CONTROLLED modifier to the gate with the given control qubit or Sequence of control
qubits.
"""
control_qubit = control_qubit if isinstance(control_qubit, Sequence) else [control_qubit]
for qubit in control_qubit:
qubit = unpack_qubit(qubit)
self.modifiers.insert(0, "CONTROLLED")
self.qubits.insert(0, qubit)
return self
def forked(self, fork_qubit: QubitDesignator, alt_params: List[ParameterDesignator]) -> "Gate":
"""
Add the FORKED modifier to the gate with the given fork qubit and given additional
parameters.
"""
if not isinstance(alt_params, list):
raise TypeError("Gate params must be a list")
if len(self.params) != len(alt_params):
raise ValueError(
"Expected {} parameters but received {}".format(len(self.params), len(alt_params))
)
fork_qubit = unpack_qubit(fork_qubit)
self.modifiers.insert(0, "FORKED")
self.qubits.insert(0, fork_qubit)
self.params += alt_params
return self
def dagger(self) -> "Gate":
"""
Add the DAGGER modifier to the gate.
"""
self.modifiers.insert(0, "DAGGER")
return self
def __repr__(self) -> str:
return "<Gate " + str(self) + ">"
def __str__(self) -> str:
if self.params:
return "{}{}{} {}".format(
" ".join(self.modifiers) + " " if self.modifiers else "",
self.name,
_format_params(self.params),
_format_qubits_str(self.qubits),
)
else:
return "{}{} {}".format(
" ".join(self.modifiers) + " " if self.modifiers else "",
self.name,
_format_qubits_str(self.qubits),
)
def _strip_modifiers(gate: Gate, limit: Optional[int] = None) -> Gate:
"""
Remove modifiers from :py:class:`Gate`.
This function removes up to ``limit`` gate modifiers from the given gate,
starting from the leftmost gate modifier.
:param gate: A gate.
:param limit: An upper bound on how many modifiers to remove.
"""
if limit is None:
limit = len(gate.modifiers)
# We walk the modifiers from left-to-right, tracking indices to identify
# qubits/params introduced by gate modifiers.
#
# Invariants:
# - gate.qubits[0:qubit_index] are qubits introduced by gate modifiers
# - gate.params[param_index:] are parameters introduced by gate modifiers
qubit_index = 0
param_index = len(gate.params)
for m in gate.modifiers[:limit]:
if m == "CONTROLLED":
qubit_index += 1
elif m == "FORKED":
if param_index % 2 != 0:
raise ValueError("FORKED gate has an invalid number of parameters.")
param_index //= 2
qubit_index += 1
elif m == "DAGGER":
pass
else:
raise TypeError("Unsupported gate modifier {}".format(m))
stripped = Gate(gate.name, gate.params[:param_index], gate.qubits[qubit_index:])
stripped.modifiers = gate.modifiers[limit:]
return stripped
class Measurement(AbstractInstruction):
"""
This is the pyQuil object for a Quil measurement instruction.
"""
def __init__(
self,
qubit: Union[Qubit, QubitPlaceholder, FormalArgument],
classical_reg: Optional[MemoryReference],
):
if not isinstance(qubit, (Qubit, QubitPlaceholder, FormalArgument)):
raise TypeError("qubit should be a Qubit")
if classical_reg is not None and not isinstance(classical_reg, MemoryReference):
raise TypeError("classical_reg should be None or a MemoryReference instance")
self.qubit = qubit
self.classical_reg = classical_reg
def out(self) -> str:
if self.classical_reg:
return "MEASURE {} {}".format(self.qubit.out(), self.classical_reg.out())
else:
return "MEASURE {}".format(self.qubit.out())
def __str__(self) -> str:
if self.classical_reg:
return "MEASURE {} {}".format(_format_qubit_str(self.qubit), str(self.classical_reg))
else:
return "MEASURE {}".format(_format_qubit_str(self.qubit))
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return {_extract_qubit_index(self.qubit, indices)}
class ResetQubit(AbstractInstruction):
"""
This is the pyQuil object for a Quil targeted reset instruction.
"""
def __init__(self, qubit: Union[Qubit, QubitPlaceholder, FormalArgument]):
if not isinstance(qubit, (Qubit, QubitPlaceholder, FormalArgument)):
raise TypeError("qubit should be a Qubit")
self.qubit = qubit
def out(self) -> str:
return "RESET {}".format(self.qubit.out())
def __str__(self) -> str:
return "RESET {}".format(_format_qubit_str(self.qubit))
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return {_extract_qubit_index(self.qubit, indices)}
class DefGate(AbstractInstruction):
"""
A DEFGATE directive.
:param name: The name of the newly defined gate.
:param matrix: The matrix defining this gate.
:param parameters: list of parameters that are used in this gate
"""
def __init__(
self,
name: str,
matrix: Union[List[List[Any]], np.ndarray, np.matrix],
parameters: Optional[List[Parameter]] = None,
):
if not isinstance(name, str):
raise TypeError("Gate name must be a string")
if name in RESERVED_WORDS:
raise ValueError(
"Cannot use {} for a gate name since it's a reserved word".format(name)
)
if isinstance(matrix, list):
rows = len(matrix)
if not all([len(row) == rows for row in matrix]):
raise ValueError("Matrix must be square.")
elif isinstance(matrix, (np.ndarray, np.matrix)):
rows, cols = matrix.shape
if rows != cols:
raise ValueError("Matrix must be square.")
else:
raise TypeError("Matrix argument must be a list or NumPy array/matrix")
if 0 != rows & (rows - 1):
raise ValueError("Dimension of matrix must be a power of 2, got {0}".format(rows))
self.name = name
self.matrix = np.asarray(matrix)
if parameters:
if not isinstance(parameters, list):
raise TypeError("Paramaters must be a list")
expressions = [
elem for row in self.matrix for elem in row if isinstance(elem, Expression)
]
used_params = {param for exp in expressions for param in _contained_parameters(exp)}
if set(parameters) != used_params:
raise ValueError(
"Parameters list does not match parameters actually used in gate matrix:\n"
"Parameters in argument: {}, Parameters in matrix: {}".format(
parameters, used_params
)
)
else:
is_unitary = np.allclose(np.eye(rows), self.matrix.dot(self.matrix.T.conj()))
if not is_unitary:
raise ValueError("Matrix must be unitary.")
self.parameters = parameters
def out(self) -> str:
"""
Prints a readable Quil string representation of this gate.
:returns: String representation of a gate
"""
def format_matrix_element(element: Union[ExpressionDesignator, str]) -> str:
"""
Formats a parameterized matrix element.
:param element: The parameterized element to format.
"""
if isinstance(element, (int, float, complex, np.int_)):
return format_parameter(element)
elif isinstance(element, str):
return element
elif isinstance(element, Expression):
return str(element)
else:
raise TypeError("Invalid matrix element: %r" % element)
if self.parameters:
result = "DEFGATE {}({}):\n".format(self.name, ", ".join(map(str, self.parameters)))
else:
result = "DEFGATE {}:\n".format(self.name)
for row in self.matrix:
result += " "
fcols = [format_matrix_element(col) for col in row]
result += ", ".join(fcols)
result += "\n"
return result
def get_constructor(self) -> Union[Callable[..., Gate], Callable[..., Callable[..., Gate]]]:
"""
:returns: A function that constructs this gate on variable qubit indices. E.g.
`mygate.get_constructor()(1) applies the gate to qubit 1.`
"""
if self.parameters:
return lambda *params: lambda *qubits: Gate(
name=self.name, params=list(params), qubits=list(map(unpack_qubit, qubits))
)
else:
return lambda *qubits: Gate(
name=self.name, params=[], qubits=list(map(unpack_qubit, qubits))
)
def num_args(self) -> int:
"""
:return: The number of qubit arguments the gate takes.
"""
rows = len(self.matrix)
return int(np.log2(rows))
class DefPermutationGate(DefGate):
def __init__(self, name: str, permutation: Union[List[Union[int, np.int_]], np.ndarray]):
if not isinstance(name, str):
raise TypeError("Gate name must be a string")
if name in RESERVED_WORDS:
raise ValueError(f"Cannot use {name} for a gate name since it's a reserved word")
if not isinstance(permutation, (list, np.ndarray)):
raise ValueError(
f"Permutation must be a list or NumPy array, got value of type {type(permutation)}"
)
permutation = np.asarray(permutation)
ndim = permutation.ndim
if 1 != ndim:
raise ValueError(f"Permutation must have dimension 1, got {permutation.ndim}")
elts = permutation.shape[0]
if 0 != elts & (elts - 1):
raise ValueError(f"Dimension of permutation must be a power of 2, got {elts}")
self.name = name
self.permutation = permutation
self.parameters = None
def out(self) -> str:
body = ", ".join([str(p) for p in self.permutation])
return f"DEFGATE {self.name} AS PERMUTATION:\n {body}"
def num_args(self) -> int:
"""
:return: The number of qubit arguments the gate takes.
"""
return int(np.log2(len(self.permutation)))
class DefGateByPaulis(DefGate):
"""
Records a gate definition as the exponentiation of a PauliSum.
"""
def __init__(
self,
gate_name: str,
parameters: List[Parameter],
arguments: List[QubitDesignator],
body: "PauliSum",
):
if not isinstance(gate_name, str):
raise TypeError("Gate name must be a string")
if gate_name in RESERVED_WORDS:
raise ValueError(f"Cannot use {gate_name} for a gate name since it's a reserved word")
self.name = gate_name
self.parameters = parameters
self.arguments = arguments
self.body = body
def out(self) -> str:
out = f"DEFGATE {self.name}"
if self.parameters is not None:
out += f"({', '.join(map(str, self.parameters))}) "
out += f"{' '.join(map(str, self.arguments))} AS PAULI-SUM:\n"
for term in self.body:
args = term._ops.keys()
word = term._ops.values()
out += f" {''.join(word)}({term.coefficient}) " + " ".join(map(str, args)) + "\n"
return out
def num_args(self) -> int:
return len(self.arguments)
class JumpTarget(AbstractInstruction):
"""
Representation of a target that can be jumped to.
"""
def __init__(self, label: Union[Label, LabelPlaceholder]):
if not isinstance(label, (Label, LabelPlaceholder)):
raise TypeError("label must be a Label")
self.label = label
def __repr__(self) -> str:
return "<JumpTarget {0}>".format(str(self.label))
def out(self) -> str:
return "LABEL {0}".format(str(self.label))
class JumpConditional(AbstractInstruction):
"""
Abstract representation of an conditional jump instruction.
"""
op: ClassVar[str]
def __init__(self, target: Union[Label, LabelPlaceholder], condition: MemoryReference):
if not isinstance(target, (Label, LabelPlaceholder)):
raise TypeError("target should be a Label")
if not isinstance(condition, MemoryReference):
raise TypeError("condition should be an MemoryReference")
self.target = target
self.condition = condition
def out(self) -> str:
return "%s %s %s" % (self.op, self.target, self.condition)
class JumpWhen(JumpConditional):
"""
The JUMP-WHEN instruction.
"""
op = "JUMP-WHEN"
class JumpUnless(JumpConditional):
"""
The JUMP-UNLESS instruction.
"""
op = "JUMP-UNLESS"
class SimpleInstruction(AbstractInstruction):
"""
Abstract class for simple instructions with no arguments.
"""
op: ClassVar[str]
def out(self) -> str:
return self.op
class Halt(SimpleInstruction):
"""
The HALT instruction.
"""
op = "HALT"
class Wait(SimpleInstruction):
"""
The WAIT instruction.
"""
op = "WAIT"
class Reset(SimpleInstruction):
"""
The RESET instruction.
"""
op = "RESET"
class Nop(SimpleInstruction):
"""
The NOP instruction.
"""
op = "NOP"
class UnaryClassicalInstruction(AbstractInstruction):
"""
The abstract class for unary classical instructions.
"""
op: ClassVar[str]
def __init__(self, target: MemoryReference):
if not isinstance(target, MemoryReference):
raise TypeError("target operand should be an MemoryReference")
self.target = target
def out(self) -> str:
return "%s %s" % (self.op, self.target)
class ClassicalNeg(UnaryClassicalInstruction):
"""
The NEG instruction.
"""
op = "NEG"
class ClassicalNot(UnaryClassicalInstruction):
"""
The NOT instruction.
"""
op = "NOT"
class LogicalBinaryOp(AbstractInstruction):
"""
The abstract class for binary logical classical instructions.
"""
op: ClassVar[str]
def __init__(self, left: MemoryReference, right: Union[MemoryReference, int]):
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if not isinstance(right, MemoryReference) and not isinstance(right, int):
raise TypeError("right operand should be an MemoryReference or an Int")
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s" % (self.op, self.left, self.right)
class ClassicalAnd(LogicalBinaryOp):
"""
WARNING: The operand order for ClassicalAnd has changed. In pyQuil versions <= 1.9, AND had
signature
AND %source %target
Now, AND has signature
AND %target %source
"""
op = "AND"
class ClassicalInclusiveOr(LogicalBinaryOp):
"""
The IOR instruction.
"""
op = "IOR"
class ClassicalExclusiveOr(LogicalBinaryOp):
"""
The XOR instruction.
"""
op = "XOR"
class ClassicalOr(ClassicalInclusiveOr):
"""
Deprecated class.
"""
def __init__(self, left: MemoryReference, right: MemoryReference):
warn(
"ClassicalOr has been deprecated. Replacing with "
"ClassicalInclusiveOr. Use ClassicalInclusiveOr instead. "
"NOTE: The operands to ClassicalInclusiveOr are inverted from "
"ClassicalOr."
)
super().__init__(right, left)
class ArithmeticBinaryOp(AbstractInstruction):
"""
The abstract class for binary arithmetic classical instructions.
"""
op: ClassVar[str]
def __init__(self, left: MemoryReference, right: Union[MemoryReference, int, float]):
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if (
not isinstance(right, MemoryReference)
and not isinstance(right, int)
and not isinstance(right, float)
):
raise TypeError("right operand should be an MemoryReference or a numeric literal")
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s" % (self.op, self.left, self.right)
class ClassicalAdd(ArithmeticBinaryOp):
"""
The ADD instruction.
"""
op = "ADD"
class ClassicalSub(ArithmeticBinaryOp):
"""
The SUB instruction.
"""
op = "SUB"
class ClassicalMul(ArithmeticBinaryOp):
"""
The MUL instruction.
"""
op = "MUL"
class ClassicalDiv(ArithmeticBinaryOp):
"""
The DIV instruction.
"""
op = "DIV"
class ClassicalMove(AbstractInstruction):
"""
The MOVE instruction.
WARNING: In pyQuil 2.0, the order of operands is as MOVE <target> <source>.
In pyQuil 1.9, the order of operands was MOVE <source> <target>.
These have reversed.
"""
op = "MOVE"
def __init__(self, left: MemoryReference, right: Union[MemoryReference, int, float]):
if not isinstance(left, MemoryReference):
raise TypeError(
"Left operand of MOVE should be an MemoryReference. "
"Note that the order of the operands in pyQuil 2.0 has reversed from "
"the order of pyQuil 1.9 ."
)
if (
not isinstance(right, MemoryReference)
and not isinstance(right, int)
and not isinstance(right, float)
):
raise TypeError(
"Right operand of MOVE should be an MemoryReference or a numeric literal"
)
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s" % (self.op, self.left, self.right)
class ClassicalFalse(ClassicalMove):
"""
Deprecated class.
"""
def __init__(self, target: MemoryReference):
super().__init__(target, 0)
warn("ClassicalFalse is deprecated in favor of ClassicalMove.")
class ClassicalTrue(ClassicalMove):
"""
Deprecated class.
"""
def __init__(self, target: MemoryReference):
super().__init__(target, 1)
warn("ClassicalTrue is deprecated in favor of ClassicalMove.")
class ClassicalExchange(AbstractInstruction):
"""
The EXCHANGE instruction.
"""
op = "EXCHANGE"
def __init__(self, left: MemoryReference, right: MemoryReference):
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if not isinstance(right, MemoryReference):
raise TypeError("right operand should be an MemoryReference")
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s" % (self.op, self.left, self.right)
class ClassicalConvert(AbstractInstruction):
"""
The CONVERT instruction.
"""
op = "CONVERT"
def __init__(self, left: MemoryReference, right: MemoryReference):
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if not isinstance(right, MemoryReference):
raise TypeError("right operand should be an MemoryReference")
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s" % (self.op, self.left, self.right)
class ClassicalLoad(AbstractInstruction):
"""
The LOAD instruction.
"""
op = "LOAD"
def __init__(self, target: MemoryReference, left: str, right: MemoryReference):
if not isinstance(target, MemoryReference):
raise TypeError("target operand should be an MemoryReference")
if not isinstance(right, MemoryReference):
raise TypeError("right operand should be an MemoryReference")
self.target = target
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s %s" % (self.op, self.target, self.left, self.right)
class ClassicalStore(AbstractInstruction):
"""
The STORE instruction.
"""
op = "STORE"
def __init__(
self, target: str, left: MemoryReference, right: Union[MemoryReference, int, float]
):
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if not (
isinstance(right, MemoryReference) or isinstance(right, int) or isinstance(right, float)
):
raise TypeError("right operand should be an MemoryReference or an int or float.")
self.target = target
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s %s" % (self.op, self.target, self.left, self.right)
class ClassicalComparison(AbstractInstruction):
"""
Abstract class for ternary comparison instructions.
"""
op: ClassVar[str]
def __init__(
self,
target: MemoryReference,
left: MemoryReference,
right: Union[MemoryReference, int, float],
):
if not isinstance(target, MemoryReference):
raise TypeError("target operand should be an MemoryReference")
if not isinstance(left, MemoryReference):
raise TypeError("left operand should be an MemoryReference")
if not (
isinstance(right, MemoryReference) or isinstance(right, int) or isinstance(right, float)
):
raise TypeError("right operand should be an MemoryReference or an int or float.")
self.target = target
self.left = left
self.right = right
def out(self) -> str:
return "%s %s %s %s" % (self.op, self.target, self.left, self.right)
class ClassicalEqual(ClassicalComparison):
"""
The EQ comparison instruction.
"""
op = "EQ"
class ClassicalLessThan(ClassicalComparison):
"""
The LT comparison instruction.
"""
op = "LT"
class ClassicalLessEqual(ClassicalComparison):
"""
The LE comparison instruction.
"""
op = "LE"
class ClassicalGreaterThan(ClassicalComparison):
"""
The GT comparison instruction.
"""
op = "GT"
class ClassicalGreaterEqual(ClassicalComparison):
"""
The GE comparison instruction.
"""
op = "GE"
class Jump(AbstractInstruction):
"""
Representation of an unconditional jump instruction (JUMP).
"""
def __init__(self, target: Union[Label, LabelPlaceholder]):
if not isinstance(target, (Label, LabelPlaceholder)):
raise TypeError("target should be a Label: {target}")
self.target = target
def out(self) -> str:
return "JUMP %s" % self.target
class Pragma(AbstractInstruction):
"""
A PRAGMA instruction.
This is printed in QUIL as::
PRAGMA <command> <arg1> <arg2> ... <argn> "<freeform_string>"
"""
def __init__(
self,
command: str,
args: Iterable[Union[QubitDesignator, str]] = (),
freeform_string: str = "",
):
if not isinstance(command, str):
raise TypeError(f"Pragma's require an identifier: {command}")
if not isinstance(args, collections.abc.Iterable):
raise TypeError(f"Pragma arguments must be an Iterable: {args}")
for a in args:
if not (
isinstance(a, str)
or isinstance(a, int)
or isinstance(a, QubitPlaceholder)
or isinstance(a, Qubit)
):
raise TypeError(f"Pragma arguments must be strings or integers: {a}")
if not isinstance(freeform_string, str):
raise TypeError(f"The freeform string argument must be a string: {freeform_string}")
self.command = command
self.args = tuple(args)
self.freeform_string = freeform_string
def out(self) -> str:
ret = "PRAGMA {}".format(self.command)
if self.args:
ret += " {}".format(" ".join(str(a) for a in self.args))
if self.freeform_string:
ret += ' "{}"'.format(self.freeform_string)
return ret
def __repr__(self) -> str:
return "<PRAGMA {}>".format(self.command)
class Declare(AbstractInstruction):
"""
A DECLARE directive.
This is printed in Quil as::
DECLARE <name> <memory-type> (SHARING <other-name> (OFFSET <amount> <type>)* )?
"""
def __init__(
self,
name: str,
memory_type: str,
memory_size: int = 1,
shared_region: Optional[str] = None,
offsets: Optional[Iterable[Tuple[int, str]]] = None,
):
self.name = name
self.memory_type = memory_type
self.memory_size = memory_size
self.shared_region = shared_region
if offsets is None:
offsets = []
self.offsets = offsets
def asdict(self) -> Dict[str, Union[Iterable[Tuple[int, str]], Optional[str], int]]:
return {
"name": self.name,
"memory_type": self.memory_type,
"memory_size": self.memory_size,
"shared_region": self.shared_region,
"offsets": self.offsets,
}
def out(self) -> str:
ret = "DECLARE {} {}[{}]".format(self.name, self.memory_type, self.memory_size)
if self.shared_region:
ret += " SHARING {}".format(self.shared_region)
for offset in self.offsets:
ret += " OFFSET {} {}".format(offset[0], offset[1])
return ret
def __repr__(self) -> str:
return "<DECLARE {}>".format(self.name)
class RawInstr(AbstractInstruction):
"""
A raw instruction represented as a string.
"""
def __init__(self, instr_str: str):
if not isinstance(instr_str, str):
raise TypeError("Raw instructions require a string.")
self.instr = instr_str
def out(self) -> str:
return self.instr
def __repr__(self) -> str:
return "<RawInstr {}>".format(self.instr)
class Pulse(AbstractInstruction):
def __init__(self, frame: Frame, waveform: Waveform, nonblocking: bool = False):
self.frame = frame
self.waveform = waveform
self.nonblocking = nonblocking
def out(self) -> str:
result = "NONBLOCKING " if self.nonblocking else ""
result += f"PULSE {self.frame} {self.waveform.out()}"
return result
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class SetFrequency(AbstractInstruction):
def __init__(self, frame: Frame, freq: ParameterDesignator):
self.frame = frame
self.freq = freq
def out(self) -> str:
return f"SET-FREQUENCY {self.frame} {self.freq}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class ShiftFrequency(AbstractInstruction):
def __init__(self, frame: Frame, freq: ParameterDesignator):
self.frame = frame
self.freq = freq
def out(self) -> str:
return f"SHIFT-FREQUENCY {self.frame} {self.freq}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class SetPhase(AbstractInstruction):
def __init__(self, frame: Frame, phase: ParameterDesignator):
self.frame = frame
self.phase = phase
def out(self) -> str:
return f"SET-PHASE {self.frame} {self.phase}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class ShiftPhase(AbstractInstruction):
def __init__(self, frame: Frame, phase: ParameterDesignator):
self.frame = frame
self.phase = phase
def out(self) -> str:
return f"SHIFT-PHASE {self.frame} {self.phase}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class SwapPhase(AbstractInstruction):
def __init__(self, frameA: Frame, frameB: Frame):
self.frameA = frameA
self.frameB = frameB
def out(self) -> str:
return f"SWAP-PHASE {self.frameA} {self.frameB}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frameA, indices) | _get_frame_qubits(self.frameB, indices)
class SetScale(AbstractInstruction):
def __init__(self, frame: Frame, scale: ParameterDesignator):
self.frame = frame
self.scale = scale
def out(self) -> str:
return f"SET-SCALE {self.frame} {self.scale}"
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class Capture(AbstractInstruction):
def __init__(
self,
frame: Frame,
kernel: Waveform,
memory_region: MemoryReference,
nonblocking: bool = False,
):
self.frame = frame
self.kernel = kernel
self.memory_region = memory_region
self.nonblocking = nonblocking
def out(self) -> str:
result = "NONBLOCKING " if self.nonblocking else ""
result += f"CAPTURE {self.frame} {self.kernel.out()}"
result += f" {self.memory_region.out()}" if self.memory_region else ""
return result
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class RawCapture(AbstractInstruction):
def __init__(
self,
frame: Frame,
duration: float,
memory_region: MemoryReference,
nonblocking: bool = False,
):
self.frame = frame
self.duration = duration
self.memory_region = memory_region
self.nonblocking = nonblocking
def out(self) -> str:
result = "NONBLOCKING " if self.nonblocking else ""
result += f"RAW-CAPTURE {self.frame} {self.duration} {self.memory_region.out()}"
return result
def get_qubits(self, indices: bool = True) -> Set[QubitDesignator]:
return _get_frame_qubits(self.frame, indices)
class DelayFrames(AbstractInstruction):
def __init__(self, frames: List[Frame], duration: float):
# all frames should be on the same qubits
if len(frames) == 0:
raise ValueError("DELAY expected nonempty list of frames.")
if len(set(tuple(f.qubits) for f in frames)) != 1:
raise ValueError(
"DELAY with explicit frames requires all frames are on the same qubits."
)
self.frames = frames
self.duration = duration
def out(self) -> str:
qubits = self.frames[0].qubits
ret = "DELAY " + _format_qubits_str(qubits)
for f in self.frames:
ret += f' "{f.name}"'
ret += f" {self.duration}"
return ret
class DelayQubits(AbstractInstruction):
def __init__(self, qubits: List[Union[Qubit, FormalArgument]], duration: float):
self.qubits = qubits
self.duration = duration
def out(self) -> str:
return f"DELAY {_format_qubits_str(self.qubits)} {self.duration}"
class FenceAll(SimpleInstruction):
"""
The FENCE instruction.
"""
op = "FENCE"
class Fence(AbstractInstruction):
def __init__(self, qubits: List[Union[Qubit, FormalArgument]]):
self.qubits = qubits
def out(self) -> str:
ret = "FENCE " + _format_qubits_str(self.qubits)
return ret
class DefWaveform(AbstractInstruction):
def __init__(
self, name: str, parameters: List[Parameter], entries: List[Union[Complex, Expression]],
):
self.name = name
self.parameters = parameters
self.entries = entries
for e in entries:
if not isinstance(e, (Complex, Expression)):
raise TypeError(f"Unsupported waveform entry {e}")
def out(self) -> str:
ret = f"DEFWAVEFORM {self.name}"
# TODO: simplify this
if len(self.parameters) > 0:
first_param, *params = self.parameters
ret += f"({first_param}"
for param in params:
ret += f", {param}"
ret += ")"
ret += ":\n "
ret += ", ".join(map(_complex_str, self.entries))
return ret
class DefCalibration(AbstractInstruction):
def __init__(
self,
name: str,
parameters: List[ParameterDesignator],
qubits: List[Union[Qubit, FormalArgument]],
instrs: List[AbstractInstruction],
):
self.name = name
self.parameters = parameters
self.qubits = qubits
self.instrs = instrs
def out(self) -> str:
ret = f"DEFCAL {self.name}"
if len(self.parameters) > 0:
ret += _format_params(self.parameters)
ret += " " + _format_qubits_str(self.qubits) + ":\n"
for instr in self.instrs:
ret += f" {instr.out()}\n"
return ret
class DefMeasureCalibration(AbstractInstruction):
def __init__(
self,
qubit: Union[Qubit, FormalArgument],
memory_reference: Optional[MemoryReference],
instrs: List[AbstractInstruction],
):
self.qubit = qubit
self.memory_reference = memory_reference
self.instrs = instrs
def out(self) -> str:
ret = f"DEFCAL MEASURE {self.qubit}"
if self.memory_reference is not None:
ret += f" {self.memory_reference}"
ret += ":\n"
for instr in self.instrs:
ret += f" {instr.out()}\n"
return ret
@dataclass
class DefFrame(AbstractInstruction):
frame: Frame
""" The frame being defined. """
direction: Optional[str] = None
""" The direction of the frame, i.e. 'tx' or 'rx'. """
initial_frequency: Optional[float] = None
""" The initial frequency of the frame. """
hardware_object: Optional[str] = None
""" The name of the hardware object associated to the frame. """
sample_rate: Optional[float] = None
""" The sample rate of the frame [Hz]. """
center_frequency: Optional[float] = None
""" The 'center' frequency of the frame, used for detuning arithmetic. """
def out(self) -> str:
r = f"DEFFRAME {self.frame.out()}"
options = [
(self.direction, "DIRECTION"),
(self.initial_frequency, "INITIAL-FREQUENCY"),
(self.center_frequency, "CENTER-FREQUENCY"),
(self.hardware_object, "HARDWARE-OBJECT"),
(self.sample_rate, "SAMPLE-RATE"),
]
if any(value for (value, name) in options):
r += ":"
for value, name in options:
if value is None:
continue
if isinstance(value, str):
value = f'"{value}"'
r += f"\n {name}: {value}"
return r + "\n"
|
|
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {'xp': [16.121891585344997, 33.901145962549492, 5.5873058066040926, -14.703203914141397, -17.875928056390336, -5.3288735306278738],
'yp': [-2.5423728813559308, -13.425925925925895, -42.422027290448327, -35.333333333333314, -8.83264462809916, -2.1686159844054487],
'min_Jp': 15.0,
'max_Jp': 95.0}
cm_data = [[ 0.21298394, 0.05589169, 0.14220951],
[ 0.21780744, 0.0570005 , 0.14665582],
[ 0.22261214, 0.05808842, 0.15115908],
[ 0.22739756, 0.05915624, 0.15572185],
[ 0.23216536, 0.06020099, 0.16034977],
[ 0.23691745, 0.06121879, 0.1650498 ],
[ 0.24164654, 0.06222163, 0.169816 ],
[ 0.24635153, 0.06321115, 0.17465056],
[ 0.25103114, 0.06418929, 0.1795555 ],
[ 0.25568737, 0.06515168, 0.1845388 ],
[ 0.26031556, 0.06610638, 0.18959733],
[ 0.26491272, 0.06705861, 0.19473015],
[ 0.26947709, 0.0680114 , 0.19993831],
[ 0.27400681, 0.06896804, 0.20522255],
[ 0.27849993, 0.06993211, 0.21058327],
[ 0.28295501, 0.07090603, 0.21602205],
[ 0.28737014, 0.0718934 , 0.22153921],
[ 0.29174204, 0.07290112, 0.22713094],
[ 0.29606871, 0.07393344, 0.23279613],
[ 0.30034822, 0.07499465, 0.23853326],
[ 0.30457867, 0.07608911, 0.24434046],
[ 0.30875826, 0.07722111, 0.25021549],
[ 0.31288529, 0.0783949 , 0.25615575],
[ 0.3169582 , 0.07961456, 0.26215837],
[ 0.32097556, 0.08088399, 0.26822019],
[ 0.32493609, 0.08220684, 0.27433782],
[ 0.3288387 , 0.08358647, 0.28050768],
[ 0.33268245, 0.08502593, 0.28672608],
[ 0.33646657, 0.08652789, 0.2929892 ],
[ 0.34019047, 0.08809468, 0.29929318],
[ 0.34385372, 0.08972821, 0.30563417],
[ 0.34745604, 0.09143006, 0.31200825],
[ 0.35099729, 0.0932014 , 0.31841152],
[ 0.35447749, 0.09504303, 0.32484029],
[ 0.35789677, 0.09695535, 0.33129096],
[ 0.36125536, 0.09893846, 0.33776007],
[ 0.36455362, 0.10099212, 0.34424427],
[ 0.36779195, 0.10311585, 0.35074041],
[ 0.37097085, 0.10530889, 0.35724546],
[ 0.37409088, 0.10757029, 0.36375657],
[ 0.37715263, 0.10989888, 0.37027108],
[ 0.38015674, 0.11229336, 0.37678646],
[ 0.38310387, 0.11475229, 0.38330035],
[ 0.38599472, 0.11727411, 0.38981058],
[ 0.38882999, 0.1198572 , 0.3963151 ],
[ 0.39161037, 0.12249987, 0.402812 ],
[ 0.3943366 , 0.12520039, 0.40929955],
[ 0.39700936, 0.12795703, 0.41577611],
[ 0.39962936, 0.13076802, 0.42224018],
[ 0.40219729, 0.13363161, 0.42869038],
[ 0.40471394, 0.13654614, 0.43512488],
[ 0.40717995, 0.13950986, 0.44154258],
[ 0.4095959 , 0.14252107, 0.44794287],
[ 0.41196239, 0.14557814, 0.45432475],
[ 0.41428002, 0.1486795 , 0.4606873 ],
[ 0.41654936, 0.15182361, 0.46702967],
[ 0.41877098, 0.15500903, 0.47335108],
[ 0.4209454 , 0.15823432, 0.4796508 ],
[ 0.42307313, 0.16149814, 0.48592814],
[ 0.42515465, 0.16479918, 0.49218247],
[ 0.42719043, 0.1681362 , 0.49841321],
[ 0.42918111, 0.17150798, 0.50461925],
[ 0.431127 , 0.17491341, 0.5108004 ],
[ 0.43302838, 0.17835141, 0.5169565 ],
[ 0.43488561, 0.18182099, 0.52308708],
[ 0.43669905, 0.18532117, 0.5291917 ],
[ 0.43846903, 0.18885105, 0.53526994],
[ 0.44019583, 0.19240976, 0.54132138],
[ 0.44187976, 0.19599648, 0.54734563],
[ 0.44352106, 0.19961045, 0.5533423 ],
[ 0.44512012, 0.2032509 , 0.55931077],
[ 0.44667705, 0.20691717, 0.56525088],
[ 0.44819199, 0.21060865, 0.57116243],
[ 0.44966511, 0.21432473, 0.57704502],
[ 0.45109659, 0.21806485, 0.58289828],
[ 0.45248658, 0.22182847, 0.58872183],
[ 0.45383521, 0.2256151 , 0.59451528],
[ 0.45514261, 0.22942427, 0.60027826],
[ 0.45640887, 0.23325554, 0.60601037],
[ 0.45763398, 0.23710854, 0.61171135],
[ 0.45881803, 0.24098289, 0.61738074],
[ 0.4599611 , 0.24487823, 0.62301809],
[ 0.46106323, 0.24879421, 0.62862296],
[ 0.46212445, 0.25273054, 0.63419487],
[ 0.46314479, 0.25668693, 0.63973335],
[ 0.46412426, 0.2606631 , 0.6452379 ],
[ 0.46506286, 0.2646588 , 0.650708 ],
[ 0.46596031, 0.26867393, 0.65614343],
[ 0.46681665, 0.27270825, 0.66154354],
[ 0.467632 , 0.27676148, 0.66690758],
[ 0.46840632, 0.28083345, 0.67223496],
[ 0.46913959, 0.28492398, 0.67752502],
[ 0.46983176, 0.28903289, 0.68277713],
[ 0.47048281, 0.29316004, 0.68799058],
[ 0.4710927 , 0.29730529, 0.69316468],
[ 0.47166137, 0.30146848, 0.69829868],
[ 0.47218867, 0.30564956, 0.70339194],
[ 0.47267406, 0.30984863, 0.70844403],
[ 0.47311806, 0.3140653 , 0.71345366],
[ 0.47352067, 0.31829946, 0.71841996],
[ 0.47388188, 0.322551 , 0.72334205],
[ 0.47420168, 0.32681981, 0.728219 ],
[ 0.47448009, 0.33110575, 0.73304987],
[ 0.47471715, 0.33540873, 0.73783366],
[ 0.4749129 , 0.33972863, 0.74256938],
[ 0.47506742, 0.34406531, 0.74725597],
[ 0.4751808 , 0.34841867, 0.75189235],
[ 0.47525316, 0.35278857, 0.75647742],
[ 0.47528466, 0.35717487, 0.76101004],
[ 0.47527514, 0.36157758, 0.76548918],
[ 0.47522479, 0.36599656, 0.76991363],
[ 0.47513427, 0.37043147, 0.77428199],
[ 0.47500393, 0.37488213, 0.77859297],
[ 0.47483412, 0.37934834, 0.7828453 ],
[ 0.4746253 , 0.38382989, 0.78703766],
[ 0.47437795, 0.38832654, 0.7911687 ],
[ 0.47409263, 0.39283807, 0.79523708],
[ 0.47376999, 0.39736419, 0.79924139],
[ 0.47341074, 0.40190463, 0.80318024],
[ 0.47301567, 0.40645908, 0.80705223],
[ 0.47258566, 0.41102721, 0.81085591],
[ 0.47212171, 0.41560865, 0.81458986],
[ 0.4716249 , 0.42020304, 0.81825263],
[ 0.47109642, 0.42480997, 0.82184277],
[ 0.47053758, 0.42942898, 0.82535887],
[ 0.4699498 , 0.43405962, 0.82879947],
[ 0.46933466, 0.43870139, 0.83216318],
[ 0.46869383, 0.44335376, 0.83544858],
[ 0.46802917, 0.44801616, 0.83865432],
[ 0.46734263, 0.45268799, 0.84177905],
[ 0.46663636, 0.45736864, 0.84482148],
[ 0.46591265, 0.46205743, 0.84778034],
[ 0.46517394, 0.46675366, 0.85065444],
[ 0.46442285, 0.47145661, 0.85344263],
[ 0.46366216, 0.4761655 , 0.85614385],
[ 0.46289481, 0.48087955, 0.85875708],
[ 0.46212297, 0.48559831, 0.8612812 ],
[ 0.4613509 , 0.49032052, 0.86371555],
[ 0.46058208, 0.49504528, 0.86605942],
[ 0.45982017, 0.49977167, 0.86831217],
[ 0.45906898, 0.50449872, 0.87047333],
[ 0.4583325 , 0.50922545, 0.87254251],
[ 0.45761487, 0.51395086, 0.87451947],
[ 0.45692037, 0.51867392, 0.87640412],
[ 0.45625342, 0.52339359, 0.87819649],
[ 0.45561856, 0.52810881, 0.87989676],
[ 0.45502044, 0.53281852, 0.88150529],
[ 0.45446291, 0.53752203, 0.8830221 ],
[ 0.45395166, 0.5422179 , 0.88444824],
[ 0.45349173, 0.54690499, 0.88578463],
[ 0.45308803, 0.55158223, 0.88703226],
[ 0.45274551, 0.55624857, 0.8881923 ],
[ 0.45246908, 0.56090297, 0.88926607],
[ 0.45226366, 0.5655444 , 0.89025507],
[ 0.45213406, 0.57017185, 0.89116092],
[ 0.45208461, 0.57478456, 0.89198505],
[ 0.45212047, 0.57938135, 0.89272981],
[ 0.45224622, 0.5839613 , 0.89339735],
[ 0.45246621, 0.58852353, 0.89398987],
[ 0.45278458, 0.59306722, 0.89450974],
[ 0.45320531, 0.59759159, 0.89495941],
[ 0.45373211, 0.60209592, 0.89534144],
[ 0.45436847, 0.60657953, 0.8956585 ],
[ 0.45511768, 0.61104174, 0.89591342],
[ 0.45598269, 0.61548199, 0.89610905],
[ 0.45696613, 0.61989976, 0.89624827],
[ 0.45807033, 0.62429458, 0.89633399],
[ 0.45929732, 0.62866605, 0.89636919],
[ 0.46064879, 0.63301382, 0.89635684],
[ 0.46212629, 0.6373375 , 0.89630027],
[ 0.46373081, 0.6416369 , 0.89620239],
[ 0.46546305, 0.64591186, 0.89606608],
[ 0.46732345, 0.65016224, 0.89589433],
[ 0.46931216, 0.65438798, 0.89569008],
[ 0.47142903, 0.65858902, 0.89545627],
[ 0.47367364, 0.66276538, 0.89519579],
[ 0.47604536, 0.66691708, 0.89491161],
[ 0.47854335, 0.67104413, 0.89460702],
[ 0.48116628, 0.67514678, 0.89428415],
[ 0.48391278, 0.67922522, 0.89394566],
[ 0.48678129, 0.68327963, 0.89359417],
[ 0.48977007, 0.68731025, 0.89323218],
[ 0.4928772 , 0.69131735, 0.89286215],
[ 0.49610063, 0.69530122, 0.89248647],
[ 0.49943822, 0.69926217, 0.89210744],
[ 0.50288765, 0.70320047, 0.89172772],
[ 0.50644655, 0.70711649, 0.89134936],
[ 0.51011248, 0.71101066, 0.8909741 ],
[ 0.51388294, 0.71488334, 0.89060393],
[ 0.51775541, 0.71873493, 0.89024078],
[ 0.52172732, 0.72256583, 0.8898865 ],
[ 0.5257961 , 0.72637645, 0.88954287],
[ 0.52995915, 0.7301672 , 0.8892116 ],
[ 0.53421391, 0.7339385 , 0.88889434],
[ 0.5385578 , 0.73769077, 0.88859267],
[ 0.5429883 , 0.74142444, 0.88830811],
[ 0.54750281, 0.74513991, 0.88804246],
[ 0.5520989 , 0.74883762, 0.88779685],
[ 0.55677422, 0.75251799, 0.88757251],
[ 0.56152638, 0.75618144, 0.88737072],
[ 0.56635309, 0.75982839, 0.88719273],
[ 0.57125208, 0.76345922, 0.88703974],
[ 0.57622118, 0.76707435, 0.8869129 ],
[ 0.58125826, 0.77067417, 0.88681333],
[ 0.58636126, 0.77425906, 0.88674212],
[ 0.59152819, 0.7778294 , 0.88670031],
[ 0.59675713, 0.78138555, 0.88668891],
[ 0.60204624, 0.78492789, 0.88670892],
[ 0.60739371, 0.78845676, 0.88676131],
[ 0.61279785, 0.79197249, 0.886847 ],
[ 0.61825699, 0.79547544, 0.88696697],
[ 0.62376953, 0.79896592, 0.88712212],
[ 0.62933401, 0.80244424, 0.88731328],
[ 0.63494897, 0.80591071, 0.88754133],
[ 0.64061303, 0.80936562, 0.88780715],
[ 0.64632485, 0.81280925, 0.88811162],
[ 0.65208315, 0.81624189, 0.88845562],
[ 0.65788673, 0.81966379, 0.88884001],
[ 0.6637344 , 0.82307522, 0.88926568],
[ 0.66962506, 0.82647642, 0.88973352],
[ 0.67555762, 0.82986764, 0.89024441],
[ 0.68153106, 0.83324911, 0.89079928],
[ 0.68754438, 0.83662105, 0.89139904],
[ 0.69359663, 0.83998369, 0.89204464],
[ 0.69968688, 0.84333724, 0.89273702],
[ 0.70581423, 0.84668191, 0.89347718],
[ 0.71197782, 0.85001791, 0.8942661 ],
[ 0.7181769 , 0.85334541, 0.89510469],
[ 0.72441053, 0.85666464, 0.89599414],
[ 0.73067788, 0.8599758 , 0.89693553],
[ 0.73697811, 0.8632791 , 0.89793 ],
[ 0.74331039, 0.86657473, 0.89897869],
[ 0.74967389, 0.86986292, 0.90008279],
[ 0.75606778, 0.87314387, 0.90124351],
[ 0.76249117, 0.87641781, 0.90246212],
[ 0.7689432 , 0.87968498, 0.90373988],
[ 0.77542295, 0.88294564, 0.9050781 ],
[ 0.78192947, 0.88620003, 0.90647814],
[ 0.78846179, 0.88944845, 0.90794134],
[ 0.79501887, 0.89269119, 0.9094691 ],
[ 0.80159965, 0.89592859, 0.91106281],
[ 0.80820295, 0.899161 , 0.91272391],
[ 0.81482754, 0.90238881, 0.91445386],
[ 0.82147215, 0.90561245, 0.91625407],
[ 0.82813543, 0.90883237, 0.91812595],
[ 0.83481598, 0.91204906, 0.92007088],
[ 0.84151229, 0.91526306, 0.92209023],
[ 0.84822279, 0.91847494, 0.92418529],
[ 0.85494584, 0.92168533, 0.92635732],
[ 0.8616797 , 0.9248949 , 0.92860749],
[ 0.86842255, 0.92810438, 0.9309369 ],
[ 0.87517248, 0.93131455, 0.93334654],
[ 0.88192751, 0.93452625, 0.93583728],
[ 0.88868558, 0.93774038, 0.93840987],
[ 0.89544454, 0.94095789, 0.94106488],
[ 0.90220216, 0.9441798 , 0.94380273]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#include "gtest/gtest.h"
namespace testing {
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
} // namespace testing
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string
overwrites it with the given content.
"""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions.
"""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
|
|
import boto3
import json
import os
import shutil
from botocore.stub import Stubber, ANY
from datetime import date
from django.conf import settings
from django.core import management
from django.test.testcases import TransactionTestCase
from django.test.utils import override_settings
from unittest import mock
from olympia import amo
from olympia.amo.tests import TestCase, addon_factory
from olympia.stats.management.commands import get_stats_data
from olympia.stats.management.commands.download_counts_from_file import \
is_valid_source # noqa
from olympia.stats.management.commands.update_counts_from_file import Command
from olympia.stats.models import DownloadCount, UpdateCount
hive_folder = os.path.join(settings.ROOT, 'src/olympia/stats/fixtures/files')
class FixturesFolderMixin(object):
# You have to define these two values in your subclasses.
date = 'YYYY-MM-DD'
source_folder = 'dummy'
stats_source = 'dummy'
def clean_up_files(self):
dirpath = os.path.join(hive_folder, self.date)
if os.path.isdir(dirpath):
for name in os.listdir(dirpath):
os.unlink(os.path.join(dirpath, name))
os.rmdir(dirpath)
def setUp(self):
super(FixturesFolderMixin, self).setUp()
self.clean_up_files()
shutil.copytree(os.path.join(hive_folder, self.source_folder),
os.path.join(hive_folder, self.date))
def tearDown(self):
self.clean_up_files()
super(FixturesFolderMixin, self).tearDown()
class TestADICommand(FixturesFolderMixin, TransactionTestCase):
fixtures = ('base/addon_3615', 'base/featured', 'base/appversion.json')
date = '2014-07-10'
source_folder = 'src'
stats_source = 'file'
def setUp(self):
super(TestADICommand, self).setUp()
self.command = Command()
def test_update_counts_from_file(self):
management.call_command('update_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert UpdateCount.objects.all().count() == 1
update_count = UpdateCount.objects.last()
# should be identical to `statuses.userEnabled`
assert update_count.count == 4
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {u'3.8': 2, u'3.7': 3}
assert update_count.statuses == {u'userDisabled': 1, u'userEnabled': 4}
application = u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
assert update_count.applications[application] == {u'3.6': 18}
assert update_count.oses == {u'WINNT': 5}
assert update_count.locales == {u'en-us': 1, u'en-US': 4}
def test_update_counts_from_file_includes_disabled_addons(self):
addon_factory(
guid='{39e6cf40-02f6-4bda-b1ee-409910ffd9f9}',
slug='disabled-addon',
status=amo.STATUS_DISABLED)
addon_factory(
guid='9c444b87-1124-4fd2-b97f-8fb7e9be1820',
slug='incomplete-addon', status=amo.STATUS_NULL)
management.call_command('update_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert UpdateCount.objects.all().count() == 2
update_count = UpdateCount.objects.get(addon_id=3615)
# should be identical to `statuses.userEnabled`
assert update_count.count == 4
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {u'3.8': 2, u'3.7': 3}
assert update_count.statuses == {u'userDisabled': 1, u'userEnabled': 4}
update_count = UpdateCount.objects.get(addon__slug='disabled-addon')
assert update_count.count == 2
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {}
assert update_count.statuses == {u'userEnabled': 2}
# Make sure we didn't generate any stats for incomplete add-ons
assert not UpdateCount.objects.filter(
addon__slug='incomplete-addon').exists()
def test_update_version(self):
# Initialize the known addons and their versions.
self.command.addons_versions = {3615: ['3.5', '3.6']}
uc = UpdateCount(addon_id=3615)
self.command.update_version(uc, '3.6', 123)
assert uc.versions == {'3.6': 123}
# Test very long version:
self.command.update_version(uc, '1' * 33, 1)
assert uc.versions == {'3.6': 123, '1' * 32: 1} # Trimmed.
def test_update_status(self):
uc = UpdateCount(addon_id=3615)
self.command.update_status(uc, 'foobar', 123) # Non-existent status.
assert not uc.statuses
self.command.update_status(uc, 'userEnabled', 123)
assert uc.statuses == {'userEnabled': 123}
def test_update_app(self):
firefox_guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
uc = UpdateCount(addon_id=3615)
self.command.update_app(uc, 'foobar', '1.0', 123) # Non-existent app.
assert not uc.applications
# Malformed versions.
self.command.update_app(uc, firefox_guid, '3.0.1.2', 123)
self.command.update_app(uc, firefox_guid, '3.0123', 123)
self.command.update_app(uc, firefox_guid, '3.0c2', 123)
self.command.update_app(uc, firefox_guid, 'a.b.c', 123)
assert not uc.applications
# Well formed versions.
self.command.update_app(uc, firefox_guid, '1.0', 123)
self.command.update_app(uc, firefox_guid, '1.0.1', 124)
self.command.update_app(uc, firefox_guid, '1.0a1', 125)
self.command.update_app(uc, firefox_guid, '1.0b2', 126)
assert uc.applications == {firefox_guid: {
'1.0': 123,
'1.0.1': 124,
'1.0a1': 125,
'1.0b2': 126}}
def test_update_os(self):
uc = UpdateCount(addon_id=3615)
self.command.update_os(uc, 'foobar', 123) # Non-existent OS.
assert not uc.oses
self.command.update_os(uc, 'WINNT', 123)
assert uc.oses == {'WINNT': 123}
def test_update_locale(self):
current_locales = [ # Taken from the language pack index.
'ach', 'af', 'ak', 'an', 'ar', 'as', 'ast', 'ast-ES', 'az',
'bb-BK', 'be', 'bg', 'bn', 'br', 'bs', 'ca',
'ca-valencia', 'cs', 'csb', 'cy', 'cy-GB', 'da', 'de', 'dsb', 'el',
'en-GB', 'en-ZA', 'eo', 'es-AR', 'es-CL', 'es-ES', 'es-MX', 'et',
'eu', 'fa', 'ff', 'fi', 'fj-FJ', 'fr', 'fur-IT', 'fy-NL', 'ga-IE',
'gd', 'gl', 'gu-IN', 'he', 'hi', 'hi-IN', 'hr', 'hsb', 'hu',
'hy-AM', 'id', 'is', 'it', 'ja', 'kk', 'km', 'kn', 'ko', 'ku',
'lg', 'lij', 'lt', 'lv', 'mai', 'mg', 'mk', 'ml', 'mr', 'ms',
'nb-NO', 'nl', 'nn-NO', 'nr', 'nso', 'or', 'pa-IN', 'pl', 'pt-BR',
'pt-PT', 'rm', 'ro', 'ru', 'si', 'sk', 'sl', 'son', 'sq', 'sr',
'ss', 'st', 'sv-SE', 'sw', 'sw-TZ', 'ta', 'ta-IN', 'ta-LK', 'te',
'th', 'tn', 'tr', 'ts', 'uk', 'ur', 've', 'vi', 'wa', 'wo-SN',
'xh', 'zap-MX-diiste', 'zh-CN', 'zh-TW', 'zu']
uc = UpdateCount(addon_id=3615)
self.command.update_locale(uc, 'foobar', 123) # Non-existent locale.
assert not uc.locales
for locale in current_locales:
self.command.update_locale(uc, locale, 1)
assert len(uc.locales) == len(current_locales)
def test_trim_field(self):
uc = UpdateCount(addon_id=3615, count=1, date='2015-01-11')
self.command.trim_field(uc.versions) # Empty field.
assert not uc.versions
uc.versions = {'3.6': 123, '3.7': 321}
self.command.trim_field(uc.versions) # Small enough to fit in the db.
assert uc.versions == {'3.6': 123, '3.7': 321} # Unchanged.
very_long_key = 'x' * (2 ** 16)
uc.versions[very_long_key] = 1
self.command.trim_field(uc.versions) # Too big, must be trimmed.
assert uc.versions == {'3.6': 123, '3.7': 321} # Keep the most used.
uc.versions[very_long_key] = 1000 # Most used.
self.command.trim_field(uc.versions) # Too big, must be trimmed.
# Nothing left: least used removed, but still too big, so all the keys
# were removed.
assert uc.versions == {}
# Make sure we can store a very large field in the database.
long_key = 'x' * 65528 # This makes the dict barely fit in the db.
uc.versions[long_key] = 1
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
uc.save()
uc = UpdateCount.objects.get(pk=uc.pk) # Reload
# Fits in the database, so no truncation.
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
def test_download_counts_from_file(self):
management.call_command('download_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert DownloadCount.objects.all().count() == 2
download_count = DownloadCount.objects.get(addon_id=3615)
assert download_count.count == 3
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 2, u'cb-dl-bob': 1}
def test_download_counts_from_file_includes_disabled_addons(self):
# We only exclude STATUS_NULL add-ons
addon_factory(slug='disabled-addon', status=amo.STATUS_DISABLED)
addon_factory(slug='incomplete-addon', status=amo.STATUS_NULL)
management.call_command('download_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert DownloadCount.objects.all().count() == 3
download_count = DownloadCount.objects.get(addon_id=3615)
assert download_count.count == 3
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 2, u'cb-dl-bob': 1}
download_count = DownloadCount.objects.get(
addon__slug='disabled-addon')
assert download_count.count == 1
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 1}
# Make sure we didn't generate any stats for incomplete add-ons
assert not DownloadCount.objects.filter(
addon__slug='incomplete-addon').exists()
@mock.patch(
'olympia.stats.management.commands.download_counts_from_file.'
'close_old_connections')
def test_download_counts_from_file_closes_old_connections(
self, close_old_connections_mock):
management.call_command('download_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert DownloadCount.objects.all().count() == 2
close_old_connections_mock.assert_called_once()
def test_is_valid_source(self):
assert is_valid_source('foo',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('foob',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert is_valid_source('foobaz',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('ba',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
class TestThemeADICommand(FixturesFolderMixin, TestCase):
date = '2014-11-06'
fixtures = ['base/appversion.json']
source_folder = '1093699'
stats_source = 'file'
def test_update_counts_from_file_bug_1093699(self):
addon_factory(guid='{fe9e9f88-42f0-40dc-970b-4b0e6b7a3d0b}',
type=amo.ADDON_THEME)
management.call_command('update_counts_from_file', hive_folder,
date=self.date, stats_source=self.stats_source)
assert UpdateCount.objects.all().count() == 1
uc = UpdateCount.objects.last()
# should be identical to `statuses.userEnabled`
assert uc.count == 1259
assert uc.date == date(2014, 11, 6)
assert (uc.versions ==
{u'1.7.16': 1, u'userEnabled': 3, u'1.7.13': 2, u'1.7.11': 3,
u'1.6.0': 1, u'1.7.14': 1304, u'1.7.6': 6})
assert (uc.statuses ==
{u'Unknown': 3, u'userEnabled': 1259, u'userDisabled': 58})
assert uc.oses == {u'WINNT': 1122, u'Darwin': 114, u'Linux': 84}
assert uc.locales[u'es-ES'] == 20
assert (uc.applications[u'{aa3c5121-dab2-40e2-81ca-7ea25febc110}'] ==
{u'2.0': 3})
class TestADICommandS3(TransactionTestCase):
fixtures = ('base/addon_3615', 'base/featured', 'base/appversion.json')
date = '2014-07-10'
stats_source = 's3'
def add_response(self, stat):
stat_path = os.path.join(hive_folder, 'src', '%s.hive' % stat)
data = get_stats_data(stat_path)
response = {
'Body': data,
}
expected_params = {'Bucket': 'test-bucket',
'Key': os.path.join('amo_stats', stat,
self.date, '000000_0'),
'Range': ANY}
self.stubber.add_response('get_object', response, expected_params)
def setUp(self):
self.client = boto3.client('s3')
self.stubber = Stubber(self.client)
self.stubber.activate()
def tearDown(self):
self.stubber.deactivate()
@override_settings(AWS_STATS_S3_BUCKET='test-bucket')
@mock.patch('olympia.stats.management.commands.boto3')
def test_update_counts_from_s3(self, mock_boto3):
stats = ['app', 'locale', 'os', 'status', 'version']
for x in range(2):
for stat in stats:
self.add_response('update_counts_by_%s' % stat)
mock_boto3.client.return_value = self.client
management.call_command('update_counts_from_file',
date=self.date, stats_source=self.stats_source)
assert UpdateCount.objects.all().count() == 1
update_count = UpdateCount.objects.last()
# should be identical to `statuses.userEnabled`
assert update_count.count == 4
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {u'3.8': 2, u'3.7': 3}
assert update_count.statuses == {u'userDisabled': 1, u'userEnabled': 4}
application = u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
assert update_count.applications[application] == {u'3.6': 18}
assert update_count.oses == {u'WINNT': 5}
assert update_count.locales == {u'en-us': 1, u'en-US': 4}
@override_settings(AWS_STATS_S3_BUCKET='test-bucket')
@mock.patch('olympia.stats.management.commands.boto3')
def test_download_counts_from_s3(self, mock_boto3):
for x in range(2):
self.add_response('download_counts')
mock_boto3.client.return_value = self.client
management.call_command('download_counts_from_file',
date=self.date, stats_source=self.stats_source)
assert DownloadCount.objects.all().count() == 2
download_count = DownloadCount.objects.get(addon_id=3615)
assert download_count.count == 3
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 2, u'cb-dl-bob': 1}
|
|
#
# cbpro/order_book.py
# David Caseria
#
# Live order book updated from the Coinbase Websocket Feed
from sortedcontainers import SortedDict
from decimal import Decimal
import pickle
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
class OrderBook(WebsocketClient):
def __init__(self, product_id='BTC-USD', log_to=None):
super(OrderBook, self).__init__(products=product_id)
self._asks = SortedDict()
self._bids = SortedDict()
self._client = PublicClient()
self._sequence = -1
self._log_to = log_to
if self._log_to:
assert hasattr(self._log_to, 'write')
self._current_ticker = None
@property
def product_id(self):
''' Currently OrderBook only supports a single product even though it is stored as a list of products. '''
return self.products[0]
def on_open(self):
self._sequence = -1
print("-- Subscribed to OrderBook! --\n")
def on_close(self):
print("\n-- OrderBook Socket Closed! --")
def reset_book(self):
self._asks = SortedDict()
self._bids = SortedDict()
res = self._client.get_product_order_book(product_id=self.product_id, level=3)
for bid in res['bids']:
self.add({
'id': bid[2],
'side': 'buy',
'price': Decimal(bid[0]),
'size': Decimal(bid[1])
})
for ask in res['asks']:
self.add({
'id': ask[2],
'side': 'sell',
'price': Decimal(ask[0]),
'size': Decimal(ask[1])
})
self._sequence = res['sequence']
def on_message(self, message):
if self._log_to:
pickle.dump(message, self._log_to)
sequence = message.get('sequence', -1)
if self._sequence == -1:
self.reset_book()
return
if sequence <= self._sequence:
# ignore older messages (e.g. before order book initialization from getProductOrderBook)
return
elif sequence > self._sequence + 1:
self.on_sequence_gap(self._sequence, sequence)
return
msg_type = message['type']
if msg_type == 'open':
self.add(message)
elif msg_type == 'done' and 'price' in message:
self.remove(message)
elif msg_type == 'match':
self.match(message)
self._current_ticker = message
elif msg_type == 'change':
self.change(message)
self._sequence = sequence
def on_sequence_gap(self, gap_start, gap_end):
self.reset_book()
print('Error: messages missing ({} - {}). Re-initializing book at sequence.'.format(
gap_start, gap_end, self._sequence))
def add(self, order):
order = {
'id': order.get('order_id') or order['id'],
'side': order['side'],
'price': Decimal(order['price']),
'size': Decimal(order.get('size') or order['remaining_size'])
}
if order['side'] == 'buy':
bids = self.get_bids(order['price'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['price'], bids)
else:
asks = self.get_asks(order['price'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['price'], asks)
def remove(self, order):
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is not None:
bids = [o for o in bids if o['id'] != order['order_id']]
if len(bids) > 0:
self.set_bids(price, bids)
else:
self.remove_bids(price)
else:
asks = self.get_asks(price)
if asks is not None:
asks = [o for o in asks if o['id'] != order['order_id']]
if len(asks) > 0:
self.set_asks(price, asks)
else:
self.remove_asks(price)
def match(self, order):
size = Decimal(order['size'])
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if not bids:
return
assert bids[0]['id'] == order['maker_order_id']
if bids[0]['size'] == size:
self.set_bids(price, bids[1:])
else:
bids[0]['size'] -= size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if not asks:
return
assert asks[0]['id'] == order['maker_order_id']
if asks[0]['size'] == size:
self.set_asks(price, asks[1:])
else:
asks[0]['size'] -= size
self.set_asks(price, asks)
def change(self, order):
try:
new_size = Decimal(order['new_size'])
except KeyError:
return
try:
price = Decimal(order['price'])
except KeyError:
return
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is None or not any(o['id'] == order['order_id'] for o in bids):
return
index = [b['id'] for b in bids].index(order['order_id'])
bids[index]['size'] = new_size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if asks is None or not any(o['id'] == order['order_id'] for o in asks):
return
index = [a['id'] for a in asks].index(order['order_id'])
asks[index]['size'] = new_size
self.set_asks(price, asks)
tree = self._asks if order['side'] == 'sell' else self._bids
node = tree.get(price)
if node is None or not any(o['id'] == order['order_id'] for o in node):
return
def get_current_ticker(self):
return self._current_ticker
def get_current_book(self):
result = {
'sequence': self._sequence,
'asks': [],
'bids': [],
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']])
for bid in self._bids:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']])
return result
def get_ask(self):
return self._asks.peekitem(0)[0]
def get_asks(self, price):
return self._asks.get(price)
def remove_asks(self, price):
del self._asks[price]
def set_asks(self, price, asks):
self._asks[price] = asks
def get_bid(self):
return self._bids.peekitem(-1)[0]
def get_bids(self, price):
return self._bids.get(price)
def remove_bids(self, price):
del self._bids[price]
def set_bids(self, price, bids):
self._bids[price] = bids
if __name__ == '__main__':
import sys
import time
import datetime as dt
class OrderBookConsole(OrderBook):
''' Logs real-time changes to the bid-ask spread to the console '''
def __init__(self, product_id=None):
super(OrderBookConsole, self).__init__(product_id=product_id)
# latest values of bid-ask spread
self._bid = None
self._ask = None
self._bid_depth = None
self._ask_depth = None
def on_message(self, message):
super(OrderBookConsole, self).on_message(message)
# Calculate newest bid-ask spread
bid = self.get_bid()
bids = self.get_bids(bid)
bid_depth = sum([b['size'] for b in bids])
ask = self.get_ask()
asks = self.get_asks(ask)
ask_depth = sum([a['size'] for a in asks])
if self._bid == bid and self._ask == ask and self._bid_depth == bid_depth and self._ask_depth == ask_depth:
# If there are no changes to the bid-ask spread since the last update, no need to print
pass
else:
# If there are differences, update the cache
self._bid = bid
self._ask = ask
self._bid_depth = bid_depth
self._ask_depth = ask_depth
print('{} {} bid: {:.3f} @ {:.2f}\task: {:.3f} @ {:.2f}'.format(
dt.datetime.now(), self.product_id, bid_depth, bid, ask_depth, ask))
order_book = OrderBookConsole()
order_book.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
order_book.close()
if order_book.error:
sys.exit(1)
else:
sys.exit(0)
|
|
from evennia import Command as BaseCommand
from evennia import utils
from evennia.commands.default.muxcommand import MuxCommand
from world import rules
from world import english_utils
import time
class Command(BaseCommand):
"""
Inherit from this if you want to create your own command styles
from scratch. Note that Evennia's default commands inherits from
MuxCommand instead.
Note that the class's `__doc__` string (this text) is
used by Evennia to create the automatic help entry for
the command, so make sure to document consistently here.
Each Command implements the following methods, called
in this order (only func() is actually required):
- at_pre_cmd(): If this returns True, execution is aborted.
- parse(): Should perform any extra parsing needed on self.args
and store the result on self.
- func(): Performs the actual work.
- at_post_cmd(): Extra actions, often things done after
every command, like prompts.
"""
def at_post_cmd(self):
"""
This hook is called after the command has finished executing
(after self.func()).
"""
caller = self.caller
if caller.db.health != None:
if (float(caller.db.health) / float(caller.db.max_health)) > 0.80:
prompt_hp_color = "|g"
elif (float(caller.db.health) / float(caller.db.max_health)) > 0.36:
prompt_hp_color = "|y"
else:
prompt_hp_color = "|r"
if caller.db.stamina > 6:
prompt_stamina_color = "|g"
elif caller.db.stamina > 3:
prompt_stamina_color = "|y"
else:
prompt_stamina_color = "|r"
prompt = "%sHealth|n: %s%s|n - |gMagic|n: Asleep - %sStamina|n: %s%s." % (
prompt_hp_color, prompt_hp_color, caller.db.health, prompt_stamina_color, prompt_stamina_color,
caller.db.stamina)
caller.msg(prompt)
@staticmethod
def show_balance(self):
caller = self.caller
# This happens if someone doesn't have balance back yet: the skill gives a message and aborts.
if time.time() < caller.db.balance_time:
if caller.db.balance_time - time.time() > 3:
caller.msg("You need 3 more seconds!")
elif caller.db.balance_time - time.time() > 2:
caller.msg("You need 2 more seconds!")
elif caller.db.balance_time - time.time() > 1:
caller.msg("You need 1 more second!")
elif caller.db.balance_time - time.time() > 0:
caller.msg("You've almost regained balance!")
return True
class CmdDeposit(BaseCommand):
"""
Deposit some silver sovereigns into the bank.
Usage:
deposit <silver>
Hint: The fruit is fake.
"""
key = "deposit"
#aliases = ["lend", "donate"]
locks = "cmd:all()"
arg_regex = r"\s|$"
help_category = "Business"
def parse(self):
"Very trivial parser"
self.target = self.args.strip()
def func(self):
value = self.target
value_s = str(value)
caller = self.caller
if not value:
caller.msg("Deposit how much?")
return
elif not str.isdigit(value_s):
caller.msg("You must specify a number (and only a number) that you wish to deposit.")
# caller.search handles error messages
return
elif caller.db.silver_carried < int(value):
caller.msg("That's more silver than you're carrying!")
return
elif caller.db.silver_carried >= int(value):
string = "You deposit {:,} silver sovereigns into your Tower bank account.".format(int(value))
caller.msg(string)
caller.db.silver_carried = caller.db.silver_carried - int(value)
caller.db.tower_bank_account = caller.db.tower_bank_account + int(value)
return
class CmdWithdraw(BaseCommand):
"""
Withdraw some silver sovereigns from the bank.
Usage:
withdraw <silver>
Hint: The fruit is fake.
"""
key = "withdraw"
#aliases = ["lend", "donate"]
locks = "cmd:all()"
arg_regex = r"\s|$"
help_category = "Business"
def parse(self):
"Very trivial parser"
self.target = self.args.strip()
def func(self):
value = self.target
value_s = str(value)
caller = self.caller
if not value:
caller.msg("Withdraw how much?")
return
elif not str.isdigit(value_s):
caller.msg("You must specify a number (and only a number) that you wish to withdraw.")
# caller.search handles error messages
return
elif caller.db.tower_bank_account < int(value):
caller.msg("That's more silver than you have in your account!")
return
elif caller.db.tower_bank_account >= int(value):
string = "You withdraw {:,} silver sovereigns from your Tower bank account.".format(int(value))
caller.msg(string)
caller.db.silver_carried = caller.db.silver_carried + int(value)
caller.db.tower_bank_account = caller.db.tower_bank_account - int(value)
return
class CmdBalance(BaseCommand):
"""
Check the outstanding balance in your bank account.
"""
key = "balance"
#aliases = ["lend", "donate"]
locks = "cmd:all()"
arg_regex = r"\s|$"
help_category = "Business"
def func(self):
string = "You have {:,} silver sovereigns available for withdrawal from your Tower bank account.".format(self.caller.db.tower_bank_account)
self.caller.msg(string)
class CmdDonate(BaseCommand):
"""
Donate some silver sovereigns to Corinth's city coffers. City offers are primarily used for law enforcement and military purposes.
Usage:
donate <silver>
"""
key = "donate"
#aliases = ["lend", "donate"]
locks = "cmd:all()"
arg_regex = r"\s|$"
help_category = "Business"
def parse(self):
"Very trivial parser"
self.target = self.args.strip()
def func(self):
value = self.target
value_s = str(value)
caller = self.caller
sphere = caller.search("#609", global_search=True)
if not value:
caller.msg("Donate how much to Corinth?")
return
elif not str.isdigit(value_s):
caller.msg("You must specify a number (and only a number) that you wish to donate.")
# caller.search handles error messages
return
elif caller.db.silver_carried < int(value):
caller.msg("That's more silver than you're carrying!")
return
elif caller.db.silver_carried >= int(value):
string = "You deposit {:,} silver sovereigns into Corinth's city coffers.".format(int(value))
caller.msg(string)
caller.db.silver_carried = caller.db.silver_carried - int(value)
sphere.db.coffers = sphere.db.coffers + int(value)
return
# class CmdBuy(BaseCommand):
# """
# Buy an item from a shop.
#
# Usage:
# deposit <silver>
#
# Hint: The fruit is fake.
# """
# key = "buy"
# #aliases = ["lend", "donate"]
# locks = "cmd:all()"
# arg_regex = r"\s|$"
#
# def parse(self):
# "Very trivial parser"
# self.target = self.args.strip()
#
# def func(self):
#
# value = self.target
# value_s = str(value)
#
# caller = self.caller
#
# if not value:
# caller.msg("Deposit how much?")
# return
# elif not str.isdigit(value_s):
# caller.msg("You must specify a number (and only a number) that you wish to deposit.")
# # caller.search handles error messages
# return
# elif caller.db.silver_carried < int(value):
# caller.msg("That's more silver than you're carrying!")
# return
# elif caller.db.silver_carried >= int(value):
# string = "You deposit {:,} silver sovereigns into your Tower bank account.".format(int(value))
# caller.msg(string)
# caller.db.silver_carried = caller.db.silver_carried - int(value)
# caller.db.tower_bank_account = caller.db.tower_bank_account + int(value)
# return
|
|
import itertools
from typing import List, Optional, Union
import numpy as np
import pandas._libs.algos as libalgos
import pandas._libs.reshape as libreshape
from pandas._libs.sparse import IntIndex
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import notna
import pandas.core.algorithms as algos
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
from pandas.core.sorting import (
compress_group_index,
decons_obs_group_ids,
get_compressed_ids,
get_group_index,
)
class _Unstacker:
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(
self, index, level=-1, constructor=None,
):
if constructor is None:
constructor = DataFrame
self.constructor = constructor
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.codes[self.level] else 0
# Note: the "pop" below alters these in-place.
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
# Bug fix GH 20601
# If the data frame is too big, the number of unique index combination
# will cause int32 overflow on windows environments.
# We want to check and raise an error before this happens
num_rows = np.max([index_level.size for index_level in self.new_index_levels])
num_columns = self.removed_level.size
# GH20601: This forces an overflow if the number of cells is too high.
num_cells = np.multiply(num_rows, num_columns, dtype=np.int32)
if num_rows > 0 and num_columns > 0 and num_cells <= 0:
raise ValueError("Unstacked DataFrame is too big, causing int32 overflow")
self._make_selectors()
@cache_readonly
def _indexer_and_to_sort(self):
v = self.level
codes = list(self.index.codes)
levs = list(self.index.levels)
to_sort = codes[:v] + codes[v + 1 :] + [codes[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = libalgos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
return indexer, to_sort
@cache_readonly
def sorted_labels(self):
indexer, to_sort = self._indexer_and_to_sort
return [l.take(indexer) for l in to_sort]
def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
indexer, _ = self._indexer_and_to_sort
sorted_values = algos.take_nd(values, indexer, axis=0)
return sorted_values
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError("Index contains duplicate entries, cannot reshape")
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self, values, value_columns, fill_value):
if values.ndim == 1:
values = values[:, np.newaxis]
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError("must pass column labels for multi-column data")
values, _ = self.get_new_values(values, fill_value)
columns = self.get_new_columns(value_columns)
index = self.new_index
return self.constructor(values, index=index, columns=columns)
def get_new_values(self, values, fill_value=None):
if values.ndim == 1:
values = values[:, np.newaxis]
sorted_values = self._make_sorted_values(values)
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
# TODO: Under what circumstances can we rely on sorted_values
# matching values? When that holds, we can slice instead
# of take (in particular for EAs)
new_values = (
sorted_values.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values.dtype):
sorted_values = sorted_values.view("i8")
new_values = new_values.view("i8")
elif is_bool_dtype(values.dtype):
sorted_values = sorted_values.astype("object")
new_values = new_values.astype("object")
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
libreshape.unstack(
sorted_values,
mask.view("u1"),
stride,
length,
width,
new_values,
new_mask.view("u1"),
)
# reconstruct dtype if needed
if needs_i8_conversion(values.dtype):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self, value_columns):
if value_columns is None:
if self.lift == 0:
return self.removed_level._shallow_copy(name=self.removed_name)
lev = self.removed_level.insert(0, item=self.removed_level._na_value)
return lev.rename(self.removed_name)
stride = len(self.removed_level) + self.lift
width = len(value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(value_columns, MultiIndex):
new_levels = value_columns.levels + (self.removed_level_full,)
new_names = value_columns.names + (self.removed_name,)
new_codes = [lab.take(propagator) for lab in value_columns.codes]
else:
new_levels = [value_columns, self.removed_level_full]
new_names = [value_columns.name, self.removed_name]
new_codes = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new codes to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_codes.append(np.tile(repeater, width))
return MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
@cache_readonly
def new_index(self):
# Does not depend on values or value_columns
result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
level, level_codes = self.new_index_levels[0], result_codes[0]
if (level_codes == -1).any():
level = level.insert(len(level), level._na_value)
return level.take(level_codes).rename(self.new_index_names[0])
return MultiIndex(
levels=self.new_index_levels,
codes=result_codes,
names=self.new_index_names,
verify_integrity=False,
)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
# GH 19966 Make sure if MultiIndexed index has tuple name, they will be
# recognised as a whole
if clocs in index.names:
clocs = [clocs]
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
ccodes = [index.codes[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rcodes = [index.codes[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(ccodes, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False)
if not rlocs:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name="__placeholder__")
else:
dummy_index = MultiIndex(
levels=rlevels + [obs_ids],
codes=rcodes + [comp_ids],
names=rnames + ["__placeholder__"],
verify_integrity=False,
)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_codes = recons_codes
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val, fill_value=fill_value)
clocs = [v if v < val else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack("__placeholder__", fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
assert isinstance(unstcols, MultiIndex) # for mypy
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_codes = [unstcols.codes[0]]
for rec in recons_codes:
new_codes.append(rec.take(unstcols.codes[-1]))
new_columns = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
# Prioritize integer interpretation (GH #21677):
if not is_integer(level) and not level == "__placeholder__":
level = obj.index._get_level_number(level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex) or not obj._can_fast_transpose:
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
if is_extension_array_dtype(obj.dtype):
return _unstack_extension_series(obj, level, fill_value)
unstacker = _Unstacker(
obj.index, level=level, constructor=obj._constructor_expanddim,
)
return unstacker.get_result(
obj.values, value_columns=None, fill_value=fill_value
)
def _unstack_frame(obj, level, fill_value=None):
if not obj._can_fast_transpose:
unstacker = _Unstacker(obj.index, level=level)
mgr = obj._mgr.unstack(unstacker, fill_value=fill_value)
return obj._constructor(mgr)
else:
return _Unstacker(
obj.index, level=level, constructor=obj._constructor,
).get_result(obj._values, value_columns=obj.columns, fill_value=fill_value)
def _unstack_extension_series(series, level, fill_value):
"""
Unstack an ExtensionArray-backed Series.
The ExtensionDtype is preserved.
Parameters
----------
series : Series
A Series with an ExtensionArray for values
level : Any
The level name or number.
fill_value : Any
The user-level (not physical storage) fill value to use for
missing values introduced by the reshape. Passed to
``series.values.take``.
Returns
-------
DataFrame
Each column of the DataFrame will have the same dtype as
the input Series.
"""
# Defer to the logic in ExtensionBlock._unstack
df = series.to_frame()
result = df.unstack(level=level, fill_value=fill_value)
return result.droplevel(level=0, axis=1)
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
if frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if is_extension_array_dtype(dtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError(
"level should contain all level names or all level "
"numbers, not a mixture of the two."
)
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(
zip(
*[
lev.take(level_codes)
for lev, level_codes in zip(
this.columns.levels[:-1], this.columns.codes[:-1]
)
]
)
)
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = this.columns.levels[0]._shallow_copy(name=this.columns.names[0])
unique_groups = new_columns
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_codes = sorted(set(this.columns.codes[-1]))
level_vals_used = level_vals[level_codes]
levsize = len(level_codes)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.codes[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_homogeneous_type and is_extension_array_dtype(
frame.dtypes.iloc[0]
):
dtype = this[this.columns[loc]].dtypes.iloc[0]
subset = this[this.columns[loc]]
value_slice = dtype.construct_array_type()._concat_same_type(
[x._values for _, x in subset.items()]
)
N, K = this.shape
idx = np.arange(N * K).reshape(K, N).T.ravel()
value_slice = value_slice.take(idx)
elif frame._is_mixed_type:
value_slice = this[this.columns[loc]].values
else:
value_slice = this.values[:, loc]
if value_slice.ndim > 1:
# i.e. not extension
value_slice = value_slice.ravel()
new_data[key] = value_slice
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_codes = [lab.repeat(levsize) for lab in this.index.codes]
else:
old_codes, old_levels = factorize_from_iterable(this.index)
new_levels = [old_levels]
new_codes = [old_codes.repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_codes.append(np.tile(level_codes, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how="all")
return result
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
) -> "DataFrame":
"""
Convert categorical variable into dummy/indicator variables.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
Dummy-coded data.
See Also
--------
Series.str.get_dummies : Convert Series to dummy codes.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
dtypes_to_encode = ["object", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: List[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
def _get_dummies_1d(
data,
prefix,
prefix_sep="_",
dummy_na=False,
sparse=False,
drop_first=False,
dtype=None,
):
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data) -> DataFrame:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
dummy_cols = [f"{prefix}{prefix_sep}{level}" for level in levels]
index: Optional[Index]
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
fill_value: Union[bool, float, int]
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices: List[List] = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(
np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype,
)
sparse_series.append(Series(data=sarr, index=index, name=col))
out = concat(sparse_series, axis=1, copy=False)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def _reorder_for_extension_array_stack(arr, n_rows: int, n_columns: int):
"""
Re-orders the values when stacking multiple extension-arrays.
The indirect stacking method used for EAs requires a followup
take to get the order correct.
Parameters
----------
arr : ExtensionArray
n_rows, n_columns : int
The number of rows and columns in the original DataFrame.
Returns
-------
taken : ExtensionArray
The original `arr` with elements re-ordered appropriately
Examples
--------
>>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f'])
>>> _reorder_for_extension_array_stack(arr, 2, 3)
array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1')
>>> _reorder_for_extension_array_stack(arr, 3, 2)
array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
"""
# final take to get the order correct.
# idx is an indexer like
# [c0r0, c1r0, c2r0, ...,
# c0r1, c1r1, c2r1, ...]
idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.ravel()
return arr.take(idx)
|
|
#!/usr/bin/python -B
__author__ = "Daniel Ralston"
__copyright__ = "2012, Daniel Ralston"
__version__ = "0.1.0"
import hashlib
import os
import pickle
import shutil
import string
import subprocess
import sys # for platform
from helpers import (get_called_script_dir, get_config_path,
read_config, save_config)
# Compensate for Python 2.x and 3.x having different module names, and
# no good way to make the same imports work on both.
try:
# Widgets
from tkinter import (Button, Entry, Frame, Label, Listbox,
Scrollbar, Tk, StringVar)
from tkinter import messagebox, filedialog
# Constants
from tkinter import BOTH, BOTTOM, END, LEFT, N, S, W, E, X, Y
except ImportError:
# Widgets
from Tkinter import (Button, Entry, Frame, Label, Listbox,
Scrollbar, Tk, StringVar)
import tkMessageBox as messagebox, tkFileDialog as filedialog
# Constants
from Tkinter import BOTH, BOTTOM, END, LEFT, N, S, W, E, X, Y
## GLOBAL VARS
# A common-denominator between Python 2.x and 3.x
CONFIG_FILENAME = "NoteBag.ini"
TEMPLATE_CONFIG_FILENAME = "Template-NoteBag.ini"
PICKLE_PROTOCOL = 2
def notes_checksum(notes):
"""
Return the hash digest of a list of notes as a string.
"""
digest = hashlib.sha1()
for note_name in notes:
note_name_bytes = note_name.encode('utf-8')
filename_bytes = notes[note_name].encode('utf-8')
digest.update(note_name_bytes)
digest.update(filename_bytes)
return digest.hexdigest()
def save_notes_list(notes, file_path):
"""
Write a list of notes out to a file. Also write a checksum of the
notes list, so read_notes_list() can validate what it reads.
"""
with open(file_path, "wb") as f:
pickle.dump(notes_checksum(notes), f, PICKLE_PROTOCOL)
pickle.dump(notes, f, PICKLE_PROTOCOL)
def read_notes_list(file_path):
"""
Read a list of notes back from a file.
If the checksum from the file doesn't match the checksum of all of
the notes that were read from it, raise a ValueError.
"""
with open(file_path, "rb") as f:
saved_checksum = pickle.load(f)
notes = pickle.load(f)
loaded_checksum = notes_checksum(notes)
if loaded_checksum != saved_checksum:
raise ValueError("The list of notes has been corrupted")
return notes
def sanitize_note_name(note_name):
"""
Very conservatively remove any characters from a note name that
might not play nice with a filesystem.
"""
note_name = note_name.strip()
def okay_filename_char(c):
return c.lower() in "abcdefghijklmnopqrstuvwxyz.-_"
return "".join(list(filter(okay_filename_char, tuple(note_name))))
def create_skeleton_note(note_name, note_path, template_file_path):
"""
Create a skeleton note document, containing just the note's name.
"""
with open(template_file_path) as tf:
template_lines = tf.readlines()
skeleton_lines = [line.replace("%(NOTE NAME)%", note_name)
for line in template_lines]
skeleton_lines = [line.encode('utf-8') for line in skeleton_lines]
with open(note_path, 'wb') as f:
for line in skeleton_lines:
f.write(line)
def open_note(note_path, document_editor=None):
"""
Open a note for editing with with a program.
Use the document_editor arg if available; otherwise, use a command
appropriate to the operating system that will open the default
program for the file type.
"""
if not os.path.isfile(note_path):
raise EnvironmentError("File {0} doesn't exist".format(note_path))
# Choose the document editor and Popen() settings based on the
# operating system.
creationflags = 0
if document_editor:
program = [os.path.expandvars(document_editor)]
elif os.name.lower() == "nt":
# I'm not using the win32process library, so I can't just
# import DETACHED_PROCESS from there.
DETACHED_PROCESS = 0x8
creationflags |= DETACHED_PROCESS
program = ["cmd", "/c", "start"]
elif sys.platform.lower() == "darwin":
# Mac OSX
program = ["open"]
elif os.name.lower() == "posix":
program = ["xdg-open"]
else:
messagebox.showerror("OS Not Supported",
"Your operating system is not supported")
return
cmd = program + [note_path]
subprocess.Popen(cmd, creationflags=creationflags,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
class NoteBag:
config = None
notes = None
# Config Options
notes_filename = None
notes_dir = None
note_template_filename = None
document_editor = None
# GUI Elements
note_name_action_strvar = None
note_name_entry_strvar = None
note_name_entry = None
note_names_label_strvar = None
note_names_listbox = None
## Config/Init Methods
def load_config(self):
"""
Load NoteBag's config file, and use it to set config options.
"""
config = self.config = read_config(CONFIG_FILENAME)
self.notes_list_filename = config.get("NoteBag", "Notes List File")
self.notes_dir = config.get("NoteBag", "Notes Directory")
self.note_template_filename = config.get("NoteBag", "Note Template Filename")
self.document_editor = config.get("NoteBag", "Document Editor")
def save_config(self):
"""
Save NoteBag's current configuration to its config file.
"""
save_config(self.config, CONFIG_FILENAME)
def load_notes_list(self):
"""
Load the list of notes.
"""
# TODO handle exceptions
notes_list_path = self.notes_list_path()
if not os.path.isfile(notes_list_path):
self.notes = {}
else:
self.notes = read_notes_list(notes_list_path)
def save_notes_list(self):
"""
Save the list of notes.
"""
save_notes_list(self.notes, self.notes_list_path())
## Back-End Methods
def notes_list_path(self):
"""
Return the path to the notes list file.
"""
return os.path.join(self.notes_dir, self.notes_list_filename)
def template_note_path(self):
"""
Return the path to the template note file.
"""
return os.path.join(get_called_script_dir(), self.note_template_filename)
def note_filename_exists(self, filename):
"""
If a note filename already exists case-insensitively, return
the proper filename from self.notes.
"""
for existing in self.notes.values():
if filename.lower() == existing.lower():
return existing
return False
def note_name_exists(self, note_name):
"""
If the given note name matches an existing note name
case-insensitively, return the "proper" note name from
self.notes; if the given note name does not exist, return
None.
"""
note_names = self.notes.keys()
for existing_note_name in note_names:
if note_name.lower() == existing_note_name.lower():
return existing_note_name
return None
def get_note_path(self, note_name):
"""
Return the path to an existing note document.
"""
note_filename = self.notes[note_name]
note_path = os.path.join(self.notes_dir, note_filename)
return note_path
def new_note_filename(self, note_name):
"""
Return an unused filename appropriate for the given note name.
Note filenames are .rtf files. All "un-kosher" characters are
stripped out of the note name, so the filesystem doesn't choke
on them.
"""
filename_base = sanitize_note_name(note_name)
filename = filename_base + ".rtf"
if not self.note_filename_exists(filename):
return filename
suffix_num = 2
while self.note_filename_exists(filename):
filename = "{0}-{1}.rtf".format(filename_base, str(suffix_num))
suffix_num += 1
return filename
def get_listbox_selected_note_name(self):
"""
Return the note name that is selected in the listbox; if there
is no selection, return None.
"""
selections = self.note_names_listbox.curselection()
if not selections:
return None
selection = selections[0]
note_name = self.note_names_listbox.get(selection)
return note_name
def get_entered_note_name(self):
"""
Get the text that has been entered into the "Note Name" text
entry box, with all leading and trailing spaces stripped off.
"""
return self.note_name_entry.get().strip("\t ")
def add_note(self, note_name, note_filename=None):
"""
Add a note document, and save the list of notes.
"""
if not note_filename:
note_filename = self.new_note_filename(note_name)
note_path = os.path.join(self.notes_dir, note_filename)
create_skeleton_note(note_name, note_path, self.template_note_path())
self.notes[note_name] = note_filename
self.save_notes_list()
def update_note_names_list(self):
"""
Update the listbox of the existing notes, and the list's
label. If there is any text entered into the "Note Name" text
entry box, only list note names that contain the entered
text. (This is where incremental search happens.)
"""
search_str = self.get_entered_note_name()
note_names = self.notes.keys()
# Remove strings that don't match
if search_str:
def string_matches_search(s):
return search_str.lower() in s.lower()
note_names = filter(string_matches_search, note_names)
# Sort Alphabetically
note_names = sorted(note_names, key=lambda s: s.lower())
# Update the note name listbox
note_names_listbox = self.note_names_listbox
note_names_listbox.delete(0, END)
for note_name in note_names:
note_names_listbox.insert(END, note_name)
# Update the note name list label
if search_str:
s = "All Note Names Containing '{0}':".format(search_str)
else:
s = "All Existing Notes:"
self.note_names_label_strvar.set(s)
def open_note(self, note_name):
"""
Open a note for editing.
"""
note_filename = self.notes[note_name]
note_path = os.path.join(self.notes_dir, note_filename)
open_note(note_path, self.document_editor)
## GUI Callbacks
def note_name_action_callback(self, *_args, **_kwargs):
"""
A callback to perform an action based on the text in the "Note
Name" text entry box.
If the name of an existing note has been entered into the text
box, open the note; if some other text has been entered,
create a note with the entered text as a name; if no text has
been entered, show a warning dialog box and do nothing.
"""
note_name = self.get_entered_note_name()
if not note_name:
messagebox.showwarning("Error", "Can't add note: no note name entered")
return
key = self.note_name_exists(note_name)
if key:
# The note exists; open it.
self.open_note(key)
else:
# The note doesn't exist; create it.
# TODO popup a small confirmation/note setup dialog.
self.add_note(note_name)
self.clear_note_name_entry()
self.open_note(note_name)
self.clear_note_name_entry()
def note_name_entry_changed(self, *_args, **_kwargs):
"""
A callback to update the text entry action ("Open"/"Add")
button's label, and update the incremental note list search,
based on the text in the "Note Name" text entry box.
"""
self.update_note_names_list()
entered_note_name = self.get_entered_note_name()
if self.note_name_exists(entered_note_name):
self.note_name_action_strvar.set("Open")
else:
self.note_name_action_strvar.set("Add")
def clear_note_name_entry(self):
"""
Clear the "Note Name" text entry box.
"""
self.note_name_entry.delete(0, END)
def open_note_from_listbox(self, *_args, **_kwargs):
"""
If a note name has been selected in the note name list, open
it; otherwise, show a warning dialog box and do nothing.
"""
note_name = self.get_listbox_selected_note_name()
if not note_name:
# TODO show a warning dialog box or something
messagebox.showwarning("Error", "Can't Open: No note selected")
return
self.open_note(note_name)
def delete_note_from_listbox(self, *_args, **_kwargs):
"""
If a note name has been selected in the note name list, delete
it after prompting the user to confirm; otherwise, show a
warning dialog box and do nothing.
"""
note_name = self.get_listbox_selected_note_name()
if not note_name:
messagebox.showwarning("Error", "Can't Delete: No note selected")
return
if not messagebox.askyesno("Really Delete Note?",
"WARNING: This will remove the note document file from your hard drive! You cannot undo this!\n\nReally remove '{0}'?".format(note_name),
icon=messagebox.ERROR):
return
note_path = self.get_note_path(note_name)
del(self.notes[note_name])
self.save_notes_list()
os.remove(note_path)
self.update_note_names_list()
## Main Code
def __init__(self, master):
## High-level Layout
input_frame = Frame(master)
notes_frame = Frame(master)
input_frame.pack(fill=X, padx=15)
notes_frame.pack(fill=BOTH, expand=True, padx=10, pady=10)
## Input Frame Setup
note_name_label = Label(input_frame, text="Note Name: ")
note_name_label.pack(side=LEFT)
self.note_name_entry_strvar = StringVar()
self.note_name_entry_strvar.set("")
self.note_name_entry_strvar.trace("w", self.note_name_entry_changed)
self.note_name_entry = Entry(input_frame,
textvariable=self.note_name_entry_strvar)
note_name_entry = self.note_name_entry
note_name_entry.pack(side=LEFT, fill=X, expand=True)
note_name_entry.focus_set()
note_name_entry.bind("<Return>", self.note_name_action_callback)
note_name_entry.bind("<KP_Enter>", self.note_name_action_callback)
self.note_name_action_strvar = StringVar()
note_name_action_strvar = self.note_name_action_strvar
note_name_action_strvar.set("Add")
note_name_action_button = Button(input_frame,
textvar=note_name_action_strvar,
command=self.note_name_action_callback)
note_name_action_button.pack(side=LEFT)
clear_note_name_button = Button(input_frame, text="Clear",
command=self.clear_note_name_entry)
clear_note_name_button.pack(side=LEFT)
## Notes Frame Setup
# List of existing notes
self.note_names_label_strvar = StringVar()
note_names_label_strvar = self.note_names_label_strvar
note_names_label = Label(notes_frame,
textvar=note_names_label_strvar)
note_names_label.pack(anchor=W)
note_names_listbox = self.note_names_listbox = Listbox(notes_frame)
note_names_listbox.pack(side=LEFT, fill=BOTH, expand=True)
note_names_listbox.bind("<Return>", self.open_note_from_listbox)
note_names_listbox.bind("<KP_Enter>", self.open_note_from_listbox)
note_names_listbox.bind("<Double-Button-1>", self.open_note_from_listbox)
# Add scrollbar to list of notes
notes_scrollbar = Scrollbar(notes_frame)
notes_scrollbar.pack(side=LEFT, fill=Y)
note_names_listbox.config(yscrollcommand=notes_scrollbar.set)
notes_scrollbar.config(command=note_names_listbox.yview)
## Controls
note_controls = Frame(notes_frame)
note_controls.pack(side=LEFT, fill=Y)
open_note_button = Button(note_controls, text="Open",
command=self.open_note_from_listbox)
open_note_button.pack(fill=X)
delete_note_button = Button(note_controls, text="Delete",
command=self.delete_note_from_listbox)
delete_note_button.pack(fill=X)
## Final Initialization
self.load_config()
self.load_notes_list()
self.update_note_names_list()
def maybe_first_time_setup():
"""
Set up the user's notes directory/folder the first time they run
NoteBag.
Returns False if it failed, or needs to try again; returns True if
it succeeds, or doesn't need to happen at all.
"""
if not os.path.isfile(get_config_path(CONFIG_FILENAME)):
shutil.copy2(get_config_path(TEMPLATE_CONFIG_FILENAME),
get_config_path(CONFIG_FILENAME))
config = read_config(CONFIG_FILENAME)
if config.get("NoteBag", "Notes Directory"):
return True
if not messagebox.askokcancel(
"NoteBag Setup",
"Hi! It looks like this is your first time running NoteBag!\n"
"Please choose the folder where you would like NoteBag to keep your notes."
):
return False
notes_dir = filedialog.askdirectory(title="Notes Folder")
print(notes_dir)
if not notes_dir:
return False
config.set("NoteBag", "Notes Directory", notes_dir)
save_config(config, CONFIG_FILENAME)
return True
if __name__ == "__main__":
print("NoteBag {0}".format(__version__))
print("Copyright (C) {0}".format(__copyright__))
# Hide the main root window, and only show the dialogs.
root = Tk()
root.withdraw()
while not maybe_first_time_setup():
success = messagebox.askretrycancel(
"Try Again?",
"It looks like your first-time setup failed. Would you like to try setting up NoteBag again?")
if not success:
root.destroy()
exit(1)
root.destroy()
# Create the main window
root = Tk()
root.title("NoteBag")
notebag = NoteBag(root)
root.mainloop()
|
|
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_i18n
from webob import exc
import webtest
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = b"<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
content_type = 'application/json'
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_request_context_elevated(self):
user_context = context.Context(
'fake_user', 'fake_project', admin=False)
self.assertFalse(user_context.is_admin)
admin_context = user_context.elevated()
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)
self.assertIn('admin', admin_context.roles)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual(language, 'known-language')
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ResourceTestCase(base.BaseTestCase):
@staticmethod
def _get_deserializer():
return wsgi.JSONDeserializer()
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_unmapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_mapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
@staticmethod
def _make_request_with_side_effect(side_effect):
controller = mock.MagicMock()
controller.test.side_effect = side_effect
resource = webtest.TestApp(wsgi_resource.Resource(controller))
routing_args = {'action': 'test'}
environ = {'wsgiorg.routing_args': (None, routing_args)}
res = resource.get('', extra_environ=environ, expect_errors=True)
return res
def test_http_error(self):
res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout())
# verify that the exception structure is the one expected
# by the python-neutronclient
self.assertEqual(exc.HTTPGatewayTimeout().explanation,
res.json['NeutronError']['message'])
self.assertEqual('HTTPGatewayTimeout',
res.json['NeutronError']['type'])
self.assertEqual('', res.json['NeutronError']['detail'])
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
def test_unhandled_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'Request Failed: internal server '
'error while processing your request.'),
'type': 'HTTPInternalServerError'}}}
res = self._make_request_with_side_effect(side_effect=Exception())
self.assertEqual(exc.HTTPInternalServerError.code,
res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_not_implemented_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'type': 'HTTPNotImplemented'}}}
res = self._make_request_with_side_effect(exc.HTTPNotImplemented())
self.assertEqual(exc.HTTPNotImplemented.code, res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ)
self.assertEqual(res.status_int, 200)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ)
self.assertEqual(res.status_int, 204)
def _test_error_log_level(self, expected_webob_exc, expect_log_info=False,
use_fault_map=True, exc_raised=None):
if not exc_raised:
class TestException(n_exc.NeutronException):
message = 'Test Exception'
exc_raised = TestException
controller = mock.MagicMock()
controller.test.side_effect = exc_raised()
faults = {exc_raised: expected_webob_exc} if use_fault_map else {}
resource = webtest.TestApp(wsgi_resource.Resource(controller, faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
with mock.patch.object(wsgi_resource, 'LOG') as log:
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, expected_webob_exc.code)
self.assertEqual(expect_log_info, log.info.called)
self.assertNotEqual(expect_log_info, log.exception.called)
def test_4xx_error_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True)
def test_non_4xx_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
expect_log_info=False)
def test_unmapped_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPInternalServerError,
expect_log_info=False, use_fault_map=False)
def test_webob_4xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound,
use_fault_map=False, expect_log_info=True,
exc_raised=exc.HTTPNotFound)
def test_webob_5xx_logged_info_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
use_fault_map=False, expect_log_info=False,
exc_raised=exc.HTTPServiceUnavailable)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ)
self.assertEqual(res.status_int, 200)
|
|
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
# type: ignore
"""
Test factory for creating test cases from lists of paths to XSD or XML files.
The list of cases can be defined within files named "testfiles". These are text files
that contain a list of relative paths to XSD or XML files, that are used to dinamically
build a set of test classes. Each path is followed by a list of options that defines a
custom setting for each test.
"""
import re
import argparse
import os
import fileinput
import logging
from xmlschema.cli import xsd_version_number, defuse_data
from xmlschema.validators import XMLSchema10, XMLSchema11
from ._observers import ObservedXMLSchema10, ObservedXMLSchema11
logger = logging.getLogger(__file__)
def get_test_args(args_line):
"""Returns the list of arguments from provided text line."""
try:
args_line, _ = args_line.split('#', 1) # Strip optional ending comment
except ValueError:
pass
return re.split(r'(?<!\\) ', args_line.strip())
def get_test_program_args_parser(default_testfiles):
"""
Gets an argument parser for building test scripts for schemas and xml files.
The returned parser has many arguments of unittest's TestProgram plus some
arguments for selecting testfiles and XML schema options.
"""
parser = argparse.ArgumentParser(add_help=True)
# unittest's arguments
parser.add_argument('-v', '--verbose', dest='verbosity', default=1,
action='store_const', const=2, help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0, help='Quiet output')
parser.add_argument('--locals', dest='tb_locals', action='store_true',
help='Show local variables in tracebacks')
parser.add_argument('-f', '--failfast', dest='failfast',
action='store_true', help='Stop on first fail or error')
parser.add_argument('-c', '--catch', dest='catchbreak',
action='store_true', help='Catch Ctrl-C and display results so far')
parser.add_argument('-b', '--buffer', dest='buffer', action='store_true',
help='Buffer stdout and stderr during tests')
parser.add_argument('-k', dest='patterns', action='append', default=list(),
help='Only run tests which match the given substring')
# xmlschema's arguments
parser.add_argument('--lxml', dest='lxml', action='store_true', default=False,
help='Check also with lxml.etree.XMLSchema (for XSD 1.0)')
parser.add_argument('--codegen', action="store_true", default=False,
help="Test code generation with XML data bindings module.")
parser.add_argument('testfiles', type=str, nargs='*', default=default_testfiles,
help="Test cases directory.")
return parser
def get_test_line_args_parser():
"""Gets an arguments parser for uncommented on not blank "testfiles" lines."""
parser = argparse.ArgumentParser(add_help=True)
parser.usage = "TEST_FILE [OPTIONS]\nTry 'TEST_FILE --help' for more information."
parser.add_argument('filename', metavar='TEST_FILE', type=str,
help="Test filename (relative path).")
parser.add_argument(
'-L', dest='locations', nargs=2, type=str, default=None, action='append',
metavar="URI-URL", help="Schema location hint overrides."
)
parser.add_argument(
'--version', dest='version', metavar='VERSION', type=xsd_version_number, default='1.0',
help="XSD schema version to use for the test case (default is 1.0)."
)
parser.add_argument(
'--errors', type=int, default=0, metavar='NUM',
help="Number of errors expected (default=0)."
)
parser.add_argument(
'--warnings', type=int, default=0, metavar='NUM',
help="Number of warnings expected (default=0)."
)
parser.add_argument(
'--inspect', action="store_true", default=False,
help="Inspect using an observed custom schema class."
)
parser.add_argument(
'--defuse', metavar='(always, remote, never)', type=defuse_data, default='remote',
help="Define when to use the defused XML data loaders."
)
parser.add_argument(
'--timeout', type=int, default=300, metavar='SEC',
help="Timeout for fetching resources (default=300)."
)
parser.add_argument(
'--validation-only', action="store_true", default=False,
help="Skip decode/encode tests on XML data."
)
parser.add_argument(
'--no-pickle', action="store_true", default=False,
help="Skip pickling/unpickling test on schema (max recursion exceeded)."
)
parser.add_argument(
'--lax-encode', action="store_true", default=False,
help="Use lax mode on encode checks (for cases where test data uses default or "
"fixed values or some test data are skipped by wildcards processContents). "
"Ignored on schema tests."
)
parser.add_argument(
'--debug', action="store_true", default=False,
help="Activate the debug mode (only the cases with --debug are executed).",
)
parser.add_argument(
'--codegen', action="store_true", default=False,
help="Test code generation with XML data bindings module. For default "
"test code generation if the same command option is provided.",
)
return parser
def factory_tests(test_class_builder, testfiles, suffix,
check_with_lxml=False, codegen=False, verbosity=1):
"""
Factory function for file based schema/validation cases.
:param test_class_builder: the test class builder function.
:param testfiles: a single or a list of testfiles indexes.
:param suffix: the suffix ('xml' or 'xsd') to consider for cases.
:param check_with_lxml: if `True` compare with lxml XMLSchema class, \
reporting anomalies. Works only for XSD 1.0 tests.
:param codegen: if `True` is provided checks code generation with XML data \
bindings module for all tests. For default is `False` and code generation \
is tested only for the cases where the same option is provided.
:param verbosity: the unittest's verbosity, can be 0, 1 or 2.
:return: a list of test classes.
"""
test_classes = {}
test_num = 0
debug_mode = False
line_buffer = []
test_line_parser = get_test_line_args_parser()
for line in fileinput.input(testfiles):
line = line.strip()
if not line or line[0] == '#':
if not line_buffer:
continue
else:
raise SyntaxError("Empty continuation at line %d!" % fileinput.filelineno())
elif '#' in line:
line = line.split('#', 1)[0].rstrip()
# Process line continuations
if line[-1] == '\\':
line_buffer.append(line[:-1].strip())
continue
elif line_buffer:
line_buffer.append(line)
line = ' '.join(line_buffer)
del line_buffer[:]
test_args = test_line_parser.parse_args(get_test_args(line))
if test_args.locations is not None:
test_args.locations = {k.strip('\'"'): v for k, v in test_args.locations}
if codegen:
test_args.codegen = True
test_file = os.path.join(os.path.dirname(fileinput.filename()), test_args.filename)
if os.path.isdir(test_file):
logger.debug("Skip %s: is a directory.", test_file)
continue
elif os.path.splitext(test_file)[1].lower() != '.%s' % suffix:
logger.debug("Skip %s: wrong suffix.", test_file)
continue
elif not os.path.isfile(test_file):
logger.error("Skip %s: is not a file.", test_file)
continue
test_num += 1
# Debug mode activation
if debug_mode:
if not test_args.debug:
continue
elif test_args.debug:
debug_mode = True
msg = "Debug mode activated: discard previous %r test classes."
logger.debug(msg, len(test_classes))
test_classes.clear()
if test_args.version == '1.0':
schema_class = ObservedXMLSchema10 if test_args.inspect else XMLSchema10
test_class = test_class_builder(
test_file, test_args, test_num, schema_class, check_with_lxml
)
else:
schema_class = ObservedXMLSchema11 if test_args.inspect else XMLSchema11
test_class = test_class_builder(
test_file, test_args, test_num, schema_class, check_with_lxml=False
)
test_classes[test_class.__name__] = test_class
if verbosity == 2:
print(f"Create case {test_class.__name__} for file {os.path.relpath(test_file)}")
logger.debug("Add XSD %s test class %r.", test_args.version, test_class.__name__)
if line_buffer:
raise ValueError("Not completed line continuation at the end!")
return test_classes
|
|
"""
This is a collection of classes for a general optimization problem.
The Scorer function should be minimized by changing the input parameters
that are inside the trial point class instance. The user can define
different search algorithms that are suitable for the particular problem.
Also there are set of solver stoppers that will stop optimization process
immediately (but stopper cannot interrupt processes inside the Scorer).
"""
import os
import math
import sys
import time
from orbit.utils import NamedObject, ParamsDictObject
# import the finalization function
from orbit.utils import orbitFinalize
#====================================================================
# class Solver
#====================================================================
class Solver:
"""
The class is a main class of the general fitting package.
It keeps references to all other components
scoreboard
search algorithm
solve stopper
scorer
TrialPoint
"""
def __init__(self):
"""
Constructor of the main class of the general fitting package.
"""
self.scoreboard = Scoreboard(self)
self.search_algorithm = None
self.stopper = SolveStopperFactory.runForeverStopper()
self.scorer = None
self.is_running = False
def getScoreboard(self):
"""
This method returns the Scoreboard.
"""
return self.scoreboard
def _setScorer(self, scorer):
"""
This method sets the scorer.
"""
self.scorer = scorer
def getScorer(self):
"""
This method returns the scorer.
"""
return self.scorer
def setAlgorithm(self, search_algorithm):
"""
This method sets the search algorithm.
"""
self.search_algorithm = search_algorithm
def getAlgorithm(self):
"""
This method returns the search algorithm.
"""
return self.search_algorithm
def setStopper(self, stopper):
"""
This method sets the stopper.
"""
self.stopper = stopper
def getStopper(self):
"""
This method returns the solver stopper class instance.
"""
return self.stopper
def isRunning(self):
"""
This method returns true or false.
"""
return self.is_running
def solve(self,scorer,initTrialPoint):
"""
This method applays the fitting algorithms to the problem.
"""
self.is_running = True
self.scoreboard.init()
self._setScorer(scorer)
self.search_algorithm.setSolver(self)
res = self.search_algorithm.setTrialPoint(initTrialPoint)
if(not res):
msg = "============ Solver class: method solve(...)=============="
msg += os.linesep
msg += "Cannot initialize the search algorithm"
msg += os.linesep
msg += "==== Trial Point ====="
msg += os.linesep
msg += initTrialPoint.textDesciption()
msg += os.linesep
msg += "Stop."
msg += os.linesep
orbitFinalize(msg)
while(not self.stopper.getShouldStop()):
self.search_algorithm.makeStep()
self.is_running = False
self._setScorer(None)
#-----------------------------------------------------------
# Class TrialPoint
#-----------------------------------------------------------
class TrialPoint:
"""
This a container class for VariableProxy instances. It keeps the information about
the values of the parameters inside VariableProxy instances. It keeps the references
in two forms - dictionary and array to facilitate usage in different types of
score functions.
"""
def __init__(self):
"""
The constructor of empty TrialPoint container.
"""
self._varProxy_arr = []
self._varProxy_dict= {}
def clean(self):
"""
"""
self._varProxy_arr = []
self._varProxy_dict= {}
def addVariableProxy(self,variableProxy):
if(self._varProxy_dict.has_key(variableProxy.getName())):
msg = "============ TrialPoint class. Method addVariableProxy(...)=============="
msg += os.linesep
msg += "============ WARNING START=============="
msg += os.linesep
msg += "You added two VariableProxy instances with the same name to TrialPoint."
msg += os.linesep
msg += "That is dangerous!"
msg += os.linesep
msg += self.textDesciption()
msg += "============ WARNING STOP=============="
msg += os.linesep
print msg
self._varProxy_arr.append(variableProxy)
self._varProxy_dict[variableProxy.getName()] = variableProxy
def addVariableProxyArr(self,variableProxy_arr):
"""
Adds VariableProxy instance to the container.
"""
for variableProxy in variableProxy_arr:
self.addVariableProxy(variableProxy)
def getCopy(self):
"""
Returns the copy of this istance of TrialPoint.
"""
trialPoint_new = TrialPoint()
for variableProxy in self._varProxy_arr:
variableProxy_new = VariableProxy(variableProxy)
trialPoint_new.addVariableProxy(variableProxy_new)
return trialPoint_new
def getVariableProxyArr(self):
"""
Returns the reference to the inner array of VariableProxy instances.
"""
return self._varProxy_arr
def getVariableProxyValuesArr(self):
"""
Returns the reference to an unbound array of values from VariableProxy variables.
This is a convinience method.
"""
values_arr = []
for variableProxy in self._varProxy_arr:
values_arr.append(variableProxy.getValue())
return values_arr
def getVariableProxyDict(self):
"""
Returns the reference to the inner dictionary of VariableProxy instances.
"""
return self._varProxy_dict
def getVariablesUsedInOptArr(self):
"""
Returns the reference to the unbound array of VariableProxy instances that
should be used in the optimization process (using variableProxy.getUseInSolver()).
This is a convinience method.
"""
val_arr = []
for variableProxy in self._varProxy_arr:
if(variableProxy.getUseInSolver()):
val_arr.append(variableProxy.getValue())
return val_arr
def getStepsUsedInOptArr(self):
"""
Returns the reference to the unbound array of steps for variables that
should be used in the optimization process (using variableProxy.getUseInSolver()).
"""
step_arr = []
for variableProxy in self._varProxy_arr:
if(variableProxy.getUseInSolver()):
step_arr.append(variableProxy.getStep())
return step_arr
def setVariablesUsedInOptArr(self,val_arr):
"""
Sets the values to the VariableProxy instances that are used
in the optimization process (using variableProxy.getUseInSolver()).
"""
nUsedInOptVars = len(self.getVariablesUsedInOptArr())
if(len(val_arr) != nUsedInOptVars):
msg = "============ TrialPoint class. Method setVariablesUsedInOptArr(...)=============="
msg += os.linesep
msg += "============ WARNING START=============="
msg += os.linesep
msg += "The number of variables is different from the number of VariableProxies."
msg += os.linesep
msg += "That is wrong! Stop"
msg += os.linesep
msg += str(val_arr)
msg += os.linesep
msg += "n used in optimization variables ="+str(nUsedInOptVars)
msg += os.linesep
st = self.textDesciption()
msg += st
msg += "============ WARNING STOP=============="
msg += os.linesep
print msg
return False
#------------------------------------------------
count = 0
for variableProxy in self._varProxy_arr:
if(variableProxy.getUseInSolver()):
variableProxy.setValue(val_arr[count])
count += 1
def setStepsUsedInOptArr(self,step_arr):
"""
Sets the of steps for variables that should be used in
the optimization process (using variableProxy.getUseInSolver()).
"""
nUsedInOptVars = len(self.getVariablesUsedInOptArr())
if(len(step_arr) != nUsedInOptVars):
msg = "============ TrialPoint class. Method setStepsUsedInOptArr(...)=============="
msg += os.linesep
msg += "============ WARNING START=============="
msg += os.linesep
msg += "The number of steps variables is different from the number of VariableProxies."
msg += os.linesep
msg += "That is wrong! Stop"
msg += os.linesep
msg += str(step_arr)
msg += os.linesep
msg += "n used in optimization variables ="+str(nUsedInOptVars)
msg += os.linesep
st = self.textDesciption()
msg += st
msg += "============ WARNING STOP=============="
msg += os.linesep
print msg
return False
#------------------------------------------------
count = 0
for variableProxy in self._varProxy_arr:
if(variableProxy.getUseInSolver()):
variableProxy.setStep(step_arr[count])
count += 1
def isAcceptable(self):
"""
It returns True of False if all variableProxies are inside value limits
"""
for variableProxy in self._varProxy_arr:
if(not variableProxy.isAcceptable()):
return False
return True
def textDesciption(self):
"""
Returns the text description of the VariableProxy-s inside this TrialPoint.
"""
st = "======== TrialPoint ==========="
st = st + os.linesep
st = st + " Name Value Step Use Limit_Min Limit_Max "
for variableProxy in self._varProxy_arr:
st += os.linesep
st += "%25s "%variableProxy.getName()
st += " %14.7g %14.7g "%(variableProxy.getValue(),variableProxy.getStep())
st += " %1d "%variableProxy.getUseInSolver()
st += " %14.7g %14.7g "%(variableProxy.getLowerLimit(),variableProxy.getUpperLimit())
return st
#-----------------------------------------------------------
# Class SolveStopper abstract class
#-----------------------------------------------------------
class SolveStopper:
"""
The SolveStopper defines if we should stop solver's optimization process
because of some condition.
"""
def __init__(self):
"""
Constructor of stopper that always returns Fasle
as the answer to "Should I Stop?" question.
"""
self._shouldStop = False
def getShouldStop(self):
"""
Returns True of False as the answer to "Should I Stop?" question.
"""
return self._shouldStop
def setShouldStop(self,shouldStop):
"""
Sets True of False as the answer to "Should I Stop?" question.
"""
self._shouldStop = shouldStop
def checkStopConditions(self,solver):
"""
This is an abstract method. The derived classes should implement this method.
Here you can get scoreBoard from solver to evaluate the stack with best
scores and TrialPoints.
Inside this method you should put the answer to "Should I Stop?" question.
"""
scoreBoard = solver.getScoreboard()
(score,trialPoint) = scoreBoard.getCurrentScoreAndTrialPoint()
scoresHistoryStack = scoreBoard.getHistoryStack()
#---- do something in your stopper
#---- if you want to stop optimization use setShouldStop(True)
#---- method.
#-----------------------------------------------------------
# Class ForeverStopper
#-----------------------------------------------------------
class RunForeverStopper(SolveStopper):
"""
The stopper implementation that always says False
to "Should I Stop?" question.
"""
def __init__(self):
SolveStopper.__init__(self)
self.setShouldStop(False)
#-----------------------------------------------------------
# Class Max Iterations Stopper
#-----------------------------------------------------------
class MaxIterationStopper(SolveStopper):
"""
The stopper implementation that will say True
to "Should I Stop?" question if the number of Scorer evaluation is more then
the maximal allowable iterations.
"""
def __init__(self,max_iteration):
SolveStopper.__init__(self)
self.max_iteration = max_iteration
self.setShouldStop(False)
def checkStopConditions(self,solver):
"""
Implementation of the abstract method of the parent class.
"""
iteration = solver.getScoreboard().getIteration()
if(iteration >= self.max_iteration):
self.setShouldStop(True)
#-----------------------------------------------------------
# Class Max Time Stopper
#-----------------------------------------------------------
class MaxTimeStopper(SolveStopper):
"""
The stopper implementation that will say True
to "Should I Stop?" question if the time is up.
"""
def __init__(self,max_time):
SolveStopper.__init__(self)
self.max_time = max_time
self.setShouldStop(False)
def checkStopConditions(self,solver):
"""
Implementation of the abstract method of the parent class.
"""
tm = solver.getScoreboard().getRunTime()
if(tm >= self.max_time):
self.setShouldStop(True)
#-----------------------------------------------------------
# Class Max Accuracy Stopper
#-----------------------------------------------------------
class MaxAccuracyStopper(SolveStopper):
"""
The stopper implementation that will say True
to "Should I Stop?" question if the difference between
two last best scores is less than the max accuracy.
"""
def __init__(self,max_accuracy):
SolveStopper.__init__(self)
self.max_accuracy = max_accuracy
self.setShouldStop(False)
def checkStopConditions(self,solver):
"""
Implementation of the abstract method of the parent class.
"""
scoresHistoryStack = solver.getScoreboard().getHistoryStack()
n_scores = len(scoresHistoryStack)
if(n_scores < 2): return
score1 = scoresHistoryStack[n_scores-2][0]
score2 = scoresHistoryStack[n_scores-1][0]
if(abs(score1 - score2) < self.max_accuracy):
self.setShouldStop(True)
#-----------------------------------------------------------
# Class Combo Stopper
#-----------------------------------------------------------
class ComboStopper(SolveStopper):
"""
The stopper implementation that will say True to "Should I Stop?"
question if at least one of stoppers registered in it will say True.
"""
def __init__(self):
SolveStopper.__init__(self)
self.stopper_arr = []
def addStopper(self,stopper):
if(not isinstance(stopper,SolveStopper)):
return False
self.stopper_arr.append(stopper)
def getShouldStop(self):
"""
Returns True of False as the answer to "Should I Stop?" question.
"""
for stopper in self.stopper_arr:
if(stopper.getShouldStop()):
return True
return False
def setShouldStop(self,shouldStop):
"""
Sets True of False as the answer to "Should I Stop?" question.
"""
for stopper in self.stopper_arr:
stopper.setShouldStop(shouldStop)
def checkStopConditions(self,solver):
"""
Implementation of the abstract method of the parent class.
"""
for stopper in self.stopper_arr:
stopper.checkStopConditions(solver)
#-----------------------------------------------------------
# Class SolveStopperFactory
#-----------------------------------------------------------
class SolveStopperFactory:
"""
The Factory for stoppers.
"""
@classmethod
def runForeverStopper(self):
"""
Returns the Run Forever stopper.
"""
return RunForeverStopper()
@classmethod
def maxIterationStopper(self,max_iteration):
"""
Returns the Max Iterations stopper.
"""
return MaxIterationStopper(max_iteration)
@classmethod
def maxTimeStopper(self,max_time):
"""
Returns the Max Time stopper.
"""
return MaxTimeStopper(max_time)
@classmethod
def maxAccuracyStopper(self,max_accuracy):
"""
Returns the Max Accuracy stopper.
"""
return MaxAccuracyStopper(max_accuracy)
@classmethod
def comboStopper(self):
"""
Returns the Combo stopper.
By itself this stopper is not operational.
You have to add stoppers into this combo
stopper.
"""
return ComboStopper()
#-----------------------------------------------------------
# ScoreBoard Action listeners
#-----------------------------------------------------------
class ScoreboardActionListener:
"""
This is an abstract class for actions inside Scoreboard.
"""
def __init__(self):
pass
def performAction(self,solver):
"""
Perform necessary action. This method should be implemented
in the children classes.
"""
pass
#-----------------------------------------------------------
# Class Scoreboard
#----------------------------------------------------------
class Scoreboard:
"""
Scoreboard class keeps the trace of all best scores
(as tuple (score,TrialPoint)) in the Scores History Stack.
The maximal size of the stack is 100 by default.
The user can define his/her own stack size.
The best score&Trialpoint are returned by getBestScore() and
getBestTrialPoint() methods.
"""
def __init__(self,solver):
self.solver = solver
self.init()
#---- listeners
self.newTrialPointListener_arr = []
self.bestScoreListener_arr = []
def init(self):
"""
Sets the clean state of the Scoreboard.
"""
self.start_time = time.time()
self.run_time = 0.
self.iterations = 0
self.bestScore = 0.00000001*sys.float_info.max
self.currentScore = self.bestScore
self.currentTrialPoint = None
self.bestTrialPoint = None
#------------------------------
self.historyStackSize = 100
#---- self.scoresHistoryStack = [(score,TrialPoint),...]
self.scoresHistoryStack = []
def addScoreTrialPoint(self,score,trialPoint):
"""
Adds the new score and Trial Point combination. If this score is the best it
will add them to the best scores & trial points history stack.
"""
self.currentScore = score
self.currentTrialPoint = trialPoint
self.run_time = time.time() - self.start_time
self.iterations += 1
#---- call performAction() method of a new trial point listeners
for listener in self.newTrialPointListener_arr:
listener.performAction(self.solver)
#---------------------------------------------------------------
self.solver.getStopper().checkStopConditions(self.solver)
if(score <= self.bestScore):
self.bestTrialPoint = trialPoint.getCopy()
self.bestScore = score
if(len(self.scoresHistoryStack) >= self.historyStackSize):
self.scoresHistoryStack.pop()
self.scoresHistoryStack.append((score,self.iterations,self.bestTrialPoint.getCopy()))
#---- call performAction() method of a new best score listeners
for listener in self.bestScoreListener_arr:
listener.performAction(self.solver)
#---------------------------------------------------------------
def getIteration(self):
"""
Retuns the number of iterations so far.
"""
return self.iterations
def setHistoryStackSize(self,historyStackSize):
"""
Changes the size of the hystory stack.
"""
self.historyStackSize = historyStackSize
self.scoresHistoryStack = self.scoresHistoryStack[:self.historyStackSize]
def getHistoryStackSize(self):
"""
Returns the size of the hystory stack.
"""
return self.historyStackSize
def getHistoryStack(self):
"""
Returns the history stack with (score,trial_point) data.
"""
return self.scoresHistoryStack
def setRunTime(self):
"""
Method sets the run-time.
"""
self.run_time = time.time() - self.start_time
def getRunTime(self):
"""
Returns the run-time.
"""
return self.run_time
def getBestTrialPoint(self):
"""
Returns the unbound best trial point.
"""
return self.bestTrialPoint.getCopy()
def getBestTrialPointReference(self):
"""
Returns the reference (not copy) to the best trial point.
"""
return self.bestTrialPoint
def getBestScore(self):
"""
Returns the best score.
"""
return self.bestScore
def getCurrentScoreAndTrialPoint(self):
"""
Method is used by stopper to check stop conditions.
"""
return (self.currentScore,self.currentTrialPoint)
def printScoreBoard(self):
"""
Prints the scoreboard state.
"""
print "==================== Score Board Stack ======START==========="
for (score,iteration,trialPoint) in self.scoresHistoryStack:
st = "===== score = %15.8g "%score+ " iter.="+str(iteration)+" "
st += trialPoint.textDesciption()
print st
print "==================== Score Board Stack =======STOP==========="
def addNewTrialPointListener(self,newTrialPointListener):
"""
Adds a new trial point listener.
"""
self.newTrialPointListener_arr.append(newTrialPointListener)
def addBestScoreListener(self,bestScoreListener):
"""
Adds a new best score listener.
"""
self.bestScoreListener_arr.append(bestScoreListener)
#====================================================================
# class VariableProxy
#====================================================================
class VariableProxy(NamedObject,ParamsDictObject):
"""
This class represents the parameter for the score function in the fitting process.
"""
def __init__(self, *arg, **kwargs):
"""
The constructor of the VariableProxy class should have the following signatures
VariableProxy(parameterProxy_in)
VariableProxy(name, value, step)
VariableProxy(name = ""???"", value = ???, step = ???)
"""
ParamsDictObject.__init__(self)
self.setName("unknown")
self.value = 0.
self.step = 0.
self.useInSolver = True
self.lowerLimit = - 0.00000001*sys.float_info.max
self.upperLimit = + 0.00000001*sys.float_info.max
#print "debug len(arg)=",len(arg)," arg=",arg
#print "debug len(kwargs)=",len(kwargs)," kwargs=",kwargs
if(not (len(arg) != 1 and len(arg) != 2 and len(arg) != 3)):
if(len(arg) == 1):
varProxy = arg[0]
if(isinstance(varProxy,VariableProxy)):
self.setName(varProxy.getName())
self.value = varProxy.getValue()
self.step = varProxy.getStep()
self.useInSolver = varProxy.getUseInSolver()
self.lowerLimit = varProxy.getLowerLimit()
self.upperLimit = varProxy.getUpperLimit()
self.updateParamsDict(varProxy.getParamsDict())
else:
msg = "VariableProxy constructor. If there is only one argument it should be only VariableProxy."
msg = msg + os.linesep
msg = "This argument is not!"
msg = msg + os.linesep
msg = msg + "Stop."
msg = msg + os.linesep
orbitFinalize(msg)
elif(len(arg) == 2):
self.setName(arg[0])
self.value = arg[1]
self.step = 0.
else:
self.setName(arg[0])
self.value = arg[1]
self.step = arg[2]
elif(len(arg) == 0 and len(kwargs) == 3 and kwargs.has_key("name") and kwargs.has_key("value") and kwargs.has_key("step")):
self.setName(kwargs["name"])
self.value = kwargs["value"]
self.step = kwargs["step"]
else:
msg = "VariableProxy constructor. It should have one of the forms:"
msg += os.linesep
msg += "1. Copy constructor: VariableProxy(parameterProxy_in)"
msg += os.linesep
msg += "2. VariableProxy(name, value)"
msg += os.linesep
msg += "3. VariableProxy(name, value, step)"
msg += os.linesep
msg += "4. VariableProxy(name = ""???"", value = ???, step = ???)"
msg += os.linesep
msg += "Stop."
msg += os.linesep
orbitFinalize(msg)
def setValue(self, value):
"""
This method sets the value.
"""
self.value = value
def getValue(self):
"""
This method returns the value.
"""
return self.value
def isAcceptable(self):
"""
It returns True of False if the self.value is inside value limits
"""
if(self.value < self.lowerLimit): return False
if(self.value > self.upperLimit): return False
return True
def setStep(self, step):
"""
This method sets the step.
"""
self.step = step
def getStep(self):
"""
This method returns the step.
"""
return self.step
def setLowerLimit(self, lowerLimit):
"""
This method sets the lowerLimit.
"""
self.lowerLimit = lowerLimit
def getLowerLimit(self):
"""
This method returns the lowerLimit.
"""
return self.lowerLimit
def setUpperLimit(self, upperLimit):
"""
This method sets the upperLimit.
"""
self.upperLimit = upperLimit
def getUpperLimit(self):
"""
This method returns the upperLimit.
"""
return self.upperLimit
def setUseInSolver(self,useInSolver):
"""
Set the Boolean variable that defines if the VariableProxy
will be used in Solver for optimization.
"""
self.useInSolver = useInSolver
def getUseInSolver(self):
"""
Returns the Boolean variable that defines if the VariableProxy
will be used in Solver for optimization.
"""
return self.useInSolver
#====================================================================
# Class Scorer
#====================================================================
class Scorer:
"""
This class calculates the score for the trial point instance.
The score should be minimized by Solver with the help from
Search Algorithm. Time to end the minimization process is defined
by the Solve Stopper.
"""
def __init__(self):
pass
def getScore(self,trialPoint):
"""
This is an empty method. The child classes should implement this method.
"""
pass
#====================================================================
# Class SearchAgorithm
#====================================================================
class SearchAgorithm(NamedObject):
"""
The SearchAgorithm is an abstract class. A concrete implementation is
needed to provide functionality of the search algorithm.
"""
def __init__(self):
self.solver = None
self.initTrialPoint = None
self.setName("No Algorithm")
def setSolver(self,solver):
"""
Sets the solver instance for the search algorithm.
"""
self.solver = solver
def setTrialPoint(self,initTrialPoint):
"""
This derived class should add functionality to this method as needed.
"""
self.initTrialPoint = initTrialPoint.getCopy()
res = self.init()
return res
def init(self):
if(self.initTrialPoint == None or self.solver == None): return False
return True
def makeStep(self):
"""
This is an abstarct method. The derived classes should implement this method.
"""
pass
|
|
"""
Automatically package and test a Python project against configurable
Python2 and Python3 based virtual environments. Environments are
setup by using virtualenv. Configuration is generally done through an
INI-style "tox.ini" file.
"""
from __future__ import print_function
import os
import re
import shutil
import subprocess
import sys
import time
import py
import tox
from tox._verlib import IrrationalVersionError
from tox._verlib import NormalizedVersion
from tox.config import parseconfig
from tox.result import ResultLog
from tox.venv import VirtualEnv
def prepare(args):
config = parseconfig(args)
if config.option.help:
show_help(config)
raise SystemExit(0)
elif config.option.helpini:
show_help_ini(config)
raise SystemExit(0)
return config
def main(args=None):
try:
config = prepare(args)
retcode = Session(config).runcommand()
raise SystemExit(retcode)
except KeyboardInterrupt:
raise SystemExit(2)
except tox.exception.MinVersionError as e:
r = Reporter(None)
r.error(str(e))
raise SystemExit(1)
def show_help(config):
tw = py.io.TerminalWriter()
tw.write(config._parser._format_help())
tw.line()
tw.line("Environment variables", bold=True)
tw.line("TOXENV: comma separated list of environments "
"(overridable by '-e')")
tw.line("TOX_TESTENV_PASSENV: space-separated list of extra "
"environment variables to be passed into test command "
"environments")
def show_help_ini(config):
tw = py.io.TerminalWriter()
tw.sep("-", "per-testenv attributes")
for env_attr in config._testenv_attr:
tw.line("%-15s %-8s default: %s" %
(env_attr.name, "<" + env_attr.type + ">", env_attr.default), bold=True)
tw.line(env_attr.help)
tw.line()
class Action(object):
def __init__(self, session, venv, msg, args):
self.venv = venv
self.msg = msg
self.activity = msg.split(" ", 1)[0]
self.session = session
self.report = session.report
self.args = args
self.id = venv and venv.envconfig.envname or "tox"
self._popenlist = []
if self.venv:
self.venvname = self.venv.name
else:
self.venvname = "GLOB"
if msg == "runtests":
cat = "test"
else:
cat = "setup"
envlog = session.resultlog.get_envlog(self.venvname)
self.commandlog = envlog.get_commandlog(cat)
def __enter__(self):
self.report.logaction_start(self)
def __exit__(self, *args):
self.report.logaction_finish(self)
def setactivity(self, name, msg):
self.activity = name
self.report.verbosity0("%s %s: %s" % (self.venvname, name, msg), bold=True)
def info(self, name, msg):
self.report.verbosity1("%s %s: %s" % (self.venvname, name, msg), bold=True)
def _initlogpath(self, actionid):
if self.venv:
logdir = self.venv.envconfig.envlogdir
else:
logdir = self.session.config.logdir
try:
l = logdir.listdir("%s-*" % actionid)
except (py.error.ENOENT, py.error.ENOTDIR):
logdir.ensure(dir=1)
l = []
num = len(l)
path = logdir.join("%s-%s.log" % (actionid, num))
f = path.open('w')
f.flush()
return f
def popen(self, args, cwd=None, env=None, redirect=True, returnout=False, ignore_ret=False):
stdout = outpath = None
resultjson = self.session.config.option.resultjson
if resultjson or redirect:
fout = self._initlogpath(self.id)
fout.write("actionid: %s\nmsg: %s\ncmdargs: %r\n\n" % (self.id, self.msg, args))
fout.flush()
self.popen_outpath = outpath = py.path.local(fout.name)
fin = outpath.open()
fin.read() # read the header, so it won't be written to stdout
stdout = fout
elif returnout:
stdout = subprocess.PIPE
if cwd is None:
# XXX cwd = self.session.config.cwd
cwd = py.path.local()
try:
popen = self._popen(args, cwd, env=env,
stdout=stdout, stderr=subprocess.STDOUT)
except OSError as e:
self.report.error("invocation failed (errno %d), args: %s, cwd: %s" %
(e.errno, args, cwd))
raise
popen.outpath = outpath
popen.args = [str(x) for x in args]
popen.cwd = cwd
popen.action = self
self._popenlist.append(popen)
try:
self.report.logpopen(popen, env=env)
try:
if resultjson and not redirect:
assert popen.stderr is None # prevent deadlock
out = None
last_time = time.time()
while 1:
fin_pos = fin.tell()
# we have to read one byte at a time, otherwise there
# might be no output for a long time with slow tests
data = fin.read(1)
if data:
sys.stdout.write(data)
if '\n' in data or (time.time() - last_time) > 1:
# we flush on newlines or after 1 second to
# provide quick enough feedback to the user
# when printing a dot per test
sys.stdout.flush()
last_time = time.time()
elif popen.poll() is not None:
if popen.stdout is not None:
popen.stdout.close()
break
else:
time.sleep(0.1)
fin.seek(fin_pos)
fin.close()
else:
out, err = popen.communicate()
except KeyboardInterrupt:
self.report.keyboard_interrupt()
popen.wait()
raise KeyboardInterrupt()
ret = popen.wait()
finally:
self._popenlist.remove(popen)
if ret and not ignore_ret:
invoked = " ".join(map(str, popen.args))
if outpath:
self.report.error("invocation failed (exit code %d), logfile: %s" %
(ret, outpath))
out = outpath.read()
self.report.error(out)
if hasattr(self, "commandlog"):
self.commandlog.add_command(popen.args, out, ret)
raise tox.exception.InvocationError(
"%s (see %s)" % (invoked, outpath), ret)
else:
raise tox.exception.InvocationError("%r" % (invoked, ), ret)
if not out and outpath:
out = outpath.read()
if hasattr(self, "commandlog"):
self.commandlog.add_command(popen.args, out, ret)
return out
def _rewriteargs(self, cwd, args):
newargs = []
for arg in args:
if sys.platform != "win32" and isinstance(arg, py.path.local):
arg = cwd.bestrelpath(arg)
newargs.append(str(arg))
# subprocess does not always take kindly to .py scripts
# so adding the interpreter here.
if sys.platform == "win32":
ext = os.path.splitext(str(newargs[0]))[1].lower()
if ext == '.py' and self.venv:
newargs = [str(self.venv.envconfig.envpython)] + newargs
return newargs
def _popen(self, args, cwd, stdout, stderr, env=None):
args = self._rewriteargs(cwd, args)
if env is None:
env = os.environ.copy()
return self.session.popen(args, shell=False, cwd=str(cwd),
universal_newlines=True,
stdout=stdout, stderr=stderr, env=env)
class Reporter(object):
actionchar = "-"
def __init__(self, session):
self.tw = py.io.TerminalWriter()
self.session = session
self._reportedlines = []
# self.cumulated_time = 0.0
@property
def verbosity(self):
if self.session:
return self.session.config.option.verbosity
else:
return 2
def logpopen(self, popen, env):
""" log information about the action.popen() created process. """
cmd = " ".join(map(str, popen.args))
if popen.outpath:
self.verbosity1(" %s$ %s >%s" % (popen.cwd, cmd, popen.outpath,))
else:
self.verbosity1(" %s$ %s " % (popen.cwd, cmd))
def logaction_start(self, action):
msg = action.msg + " " + " ".join(map(str, action.args))
self.verbosity2("%s start: %s" % (action.venvname, msg), bold=True)
assert not hasattr(action, "_starttime")
action._starttime = time.time()
def logaction_finish(self, action):
duration = time.time() - action._starttime
# self.cumulated_time += duration
self.verbosity2("%s finish: %s after %.2f seconds" % (
action.venvname, action.msg, duration), bold=True)
delattr(action, '_starttime')
def startsummary(self):
self.tw.sep("_", "summary")
def info(self, msg):
if self.verbosity >= 2:
self.logline(msg)
def using(self, msg):
if self.verbosity >= 1:
self.logline("using %s" % (msg,), bold=True)
def keyboard_interrupt(self):
self.error("KEYBOARDINTERRUPT")
# def venv_installproject(self, venv, pkg):
# self.logline("installing to %s: %s" % (venv.envconfig.envname, pkg))
def keyvalue(self, name, value):
if name.endswith(":"):
name += " "
self.tw.write(name, bold=True)
self.tw.write(value)
self.tw.line()
def line(self, msg, **opts):
self.logline(msg, **opts)
def good(self, msg):
self.logline(msg, green=True)
def warning(self, msg):
self.logline("WARNING:" + msg, red=True)
def error(self, msg):
self.logline("ERROR: " + msg, red=True)
def skip(self, msg):
self.logline("SKIPPED:" + msg, yellow=True)
def logline(self, msg, **opts):
self._reportedlines.append(msg)
self.tw.line("%s" % msg, **opts)
def verbosity0(self, msg, **opts):
if self.verbosity >= 0:
self.logline("%s" % msg, **opts)
def verbosity1(self, msg, **opts):
if self.verbosity >= 1:
self.logline("%s" % msg, **opts)
def verbosity2(self, msg, **opts):
if self.verbosity >= 2:
self.logline("%s" % msg, **opts)
# def log(self, msg):
# print(msg, file=sys.stderr)
class Session:
""" (unstable API). the session object that ties
together configuration, reporting, venv creation, testing. """
def __init__(self, config, popen=subprocess.Popen, Report=Reporter):
self.config = config
self.popen = popen
self.resultlog = ResultLog()
self.report = Report(self)
self.make_emptydir(config.logdir)
config.logdir.ensure(dir=1)
# self.report.using("logdir %s" %(self.config.logdir,))
self.report.using("tox.ini: %s" % (self.config.toxinipath,))
self._spec2pkg = {}
self._name2venv = {}
try:
self.venvlist = [
self.getvenv(x)
for x in self.config.envlist
]
except LookupError:
raise SystemExit(1)
except tox.exception.ConfigError as e:
self.report.error(str(e))
raise SystemExit(1)
self._actions = []
@property
def hook(self):
return self.config.pluginmanager.hook
def _makevenv(self, name):
envconfig = self.config.envconfigs.get(name, None)
if envconfig is None:
self.report.error("unknown environment %r" % name)
raise LookupError(name)
elif envconfig.envdir == self.config.toxinidir:
self.report.error(
"venv %r in %s would delete project" % (name, envconfig.envdir))
raise tox.exception.ConfigError('envdir must not equal toxinidir')
venv = VirtualEnv(envconfig=envconfig, session=self)
self._name2venv[name] = venv
return venv
def getvenv(self, name):
""" return a VirtualEnv controler object for the 'name' env. """
try:
return self._name2venv[name]
except KeyError:
return self._makevenv(name)
def newaction(self, venv, msg, *args):
action = Action(self, venv, msg, args)
self._actions.append(action)
return action
def runcommand(self):
self.report.using("tox-%s from %s" % (tox.__version__, tox.__file__))
if self.config.option.showconfig:
self.showconfig()
elif self.config.option.listenvs:
self.showenvs(all_envs=False, description=self.config.option.verbosity > 0)
elif self.config.option.listenvs_all:
self.showenvs(all_envs=True, description=self.config.option.verbosity > 0)
else:
return self.subcommand_test()
def _copyfiles(self, srcdir, pathlist, destdir):
for relpath in pathlist:
src = srcdir.join(relpath)
if not src.check():
self.report.error("missing source file: %s" % (src,))
raise SystemExit(1)
target = destdir.join(relpath)
target.dirpath().ensure(dir=1)
src.copy(target)
def _makesdist(self):
setup = self.config.setupdir.join("setup.py")
if not setup.check():
raise tox.exception.MissingFile(setup)
action = self.newaction(None, "packaging")
with action:
action.setactivity("sdist-make", setup)
self.make_emptydir(self.config.distdir)
action.popen([sys.executable, setup, "sdist", "--formats=zip",
"--dist-dir", self.config.distdir, ],
cwd=self.config.setupdir)
try:
return self.config.distdir.listdir()[0]
except py.error.ENOENT:
# check if empty or comment only
data = []
with open(str(setup)) as fp:
for line in fp:
if line and line[0] == '#':
continue
data.append(line)
if not ''.join(data).strip():
self.report.error(
'setup.py is empty'
)
raise SystemExit(1)
self.report.error(
'No dist directory found. Please check setup.py, e.g with:\n'
' python setup.py sdist'
)
raise SystemExit(1)
def make_emptydir(self, path):
if path.check():
self.report.info(" removing %s" % path)
shutil.rmtree(str(path), ignore_errors=True)
path.ensure(dir=1)
def setupenv(self, venv):
if venv.envconfig.missing_subs:
venv.status = (
"unresolvable substitution(s): %s. "
"Environment variables are missing or defined recursively." %
(','.join(["'%s'" % m for m in venv.envconfig.missing_subs])))
return
if not venv.matching_platform():
venv.status = "platform mismatch"
return # we simply omit non-matching platforms
action = self.newaction(venv, "getenv", venv.envconfig.envdir)
with action:
venv.status = 0
envlog = self.resultlog.get_envlog(venv.name)
try:
status = venv.update(action=action)
except IOError as e:
if e.args[0] != 2:
raise
status = (
"Error creating virtualenv. Note that spaces in paths are "
"not supported by virtualenv. Error details: %r" % e)
except tox.exception.InvocationError as e:
status = (
"Error creating virtualenv. Note that some special "
"characters (e.g. ':' and unicode symbols) in paths are "
"not supported by virtualenv. Error details: %r" % e)
if status:
commandlog = envlog.get_commandlog("setup")
commandlog.add_command(["setup virtualenv"], str(status), 1)
venv.status = status
self.report.error(str(status))
return False
commandpath = venv.getcommandpath("python")
envlog.set_python_info(commandpath)
return True
def finishvenv(self, venv):
action = self.newaction(venv, "finishvenv")
with action:
venv.finish()
return True
def developpkg(self, venv, setupdir):
action = self.newaction(venv, "developpkg", setupdir)
with action:
try:
venv.developpkg(setupdir, action)
return True
except tox.exception.InvocationError:
venv.status = sys.exc_info()[1]
return False
def installpkg(self, venv, path):
"""Install package in the specified virtual environment.
:param VenvConfig venv: Destination environment
:param str path: Path to the distribution package.
:return: True if package installed otherwise False.
:rtype: bool
"""
self.resultlog.set_header(installpkg=py.path.local(path))
action = self.newaction(venv, "installpkg", path)
with action:
try:
venv.installpkg(path, action)
return True
except tox.exception.InvocationError:
venv.status = sys.exc_info()[1]
return False
def get_installpkg_path(self):
"""
:return: Path to the distribution
:rtype: py.path.local
"""
if not self.config.option.sdistonly and (self.config.sdistsrc or
self.config.option.installpkg):
path = self.config.option.installpkg
if not path:
path = self.config.sdistsrc
path = self._resolve_pkg(path)
self.report.info("using package %r, skipping 'sdist' activity " %
str(path))
else:
try:
path = self._makesdist()
except tox.exception.InvocationError:
v = sys.exc_info()[1]
self.report.error("FAIL could not package project - v = %r" %
v)
return
sdistfile = self.config.distshare.join(path.basename)
if sdistfile != path:
self.report.info("copying new sdistfile to %r" %
str(sdistfile))
try:
sdistfile.dirpath().ensure(dir=1)
except py.error.Error:
self.report.warning("could not copy distfile to %s" %
sdistfile.dirpath())
else:
path.copy(sdistfile)
return path
def subcommand_test(self):
if self.config.skipsdist:
self.report.info("skipping sdist step")
path = None
else:
path = self.get_installpkg_path()
if not path:
return 2
if self.config.option.sdistonly:
return
for venv in self.venvlist:
if self.setupenv(venv):
if venv.envconfig.skip_install:
self.finishvenv(venv)
else:
if venv.envconfig.usedevelop:
self.developpkg(venv, self.config.setupdir)
elif self.config.skipsdist:
self.finishvenv(venv)
else:
self.installpkg(venv, path)
# write out version dependency information
action = self.newaction(venv, "envreport")
with action:
args = venv.envconfig.list_dependencies_command
output = venv._pcall(args,
cwd=self.config.toxinidir,
action=action)
# the output contains a mime-header, skip it
output = output.split("\n\n")[-1]
packages = output.strip().split("\n")
action.setactivity("installed", ",".join(packages))
envlog = self.resultlog.get_envlog(venv.name)
envlog.set_installed(packages)
self.runtestenv(venv)
retcode = self._summary()
return retcode
def runtestenv(self, venv, redirect=False):
if not self.config.option.notest:
if venv.status:
return
self.hook.tox_runtest_pre(venv=venv)
self.hook.tox_runtest(venv=venv, redirect=redirect)
self.hook.tox_runtest_post(venv=venv)
else:
venv.status = "skipped tests"
def _summary(self):
self.report.startsummary()
retcode = 0
for venv in self.venvlist:
status = venv.status
if isinstance(status, tox.exception.InterpreterNotFound):
msg = " %s: %s" % (venv.envconfig.envname, str(status))
if self.config.option.skip_missing_interpreters:
self.report.skip(msg)
else:
retcode = 1
self.report.error(msg)
elif status == "platform mismatch":
msg = " %s: %s" % (venv.envconfig.envname, str(status))
self.report.skip(msg)
elif status and status == "ignored failed command":
msg = " %s: %s" % (venv.envconfig.envname, str(status))
self.report.good(msg)
elif status and status != "skipped tests":
msg = " %s: %s" % (venv.envconfig.envname, str(status))
self.report.error(msg)
retcode = 1
else:
if not status:
status = "commands succeeded"
self.report.good(" %s: %s" % (venv.envconfig.envname, status))
if not retcode:
self.report.good(" congratulations :)")
path = self.config.option.resultjson
if path:
path = py.path.local(path)
path.write(self.resultlog.dumps_json())
self.report.line("wrote json report at: %s" % path)
return retcode
def showconfig(self):
self.info_versions()
self.report.keyvalue("config-file:", self.config.option.configfile)
self.report.keyvalue("toxinipath: ", self.config.toxinipath)
self.report.keyvalue("toxinidir: ", self.config.toxinidir)
self.report.keyvalue("toxworkdir: ", self.config.toxworkdir)
self.report.keyvalue("setupdir: ", self.config.setupdir)
self.report.keyvalue("distshare: ", self.config.distshare)
self.report.keyvalue("skipsdist: ", self.config.skipsdist)
self.report.tw.line()
for envconfig in self.config.envconfigs.values():
self.report.line("[testenv:%s]" % envconfig.envname, bold=True)
for attr in self.config._parser._testenv_attr:
self.report.line(" %-15s = %s"
% (attr.name, getattr(envconfig, attr.name)))
def showenvs(self, all_envs=False, description=False):
env_conf = self.config.envconfigs # this contains all environments
default = self.config.envlist # this only the defaults
extra = sorted([e for e in env_conf if e not in default]) if all_envs else []
if description:
self.report.line('default environments:')
max_length = max(len(env) for env in (default + extra))
def report_env(e):
if description:
text = env_conf[e].description or '[no description]'
msg = '{0} -> {1}'.format(e.ljust(max_length), text).strip()
else:
msg = e
self.report.line(msg)
for e in default:
report_env(e)
if all_envs and extra:
if description:
self.report.line('')
self.report.line('additional environments:')
for e in extra:
report_env(e)
def info_versions(self):
versions = ['tox-%s' % tox.__version__]
proc = subprocess.Popen(
(sys.executable, '-m', 'virtualenv', '--version'),
stdout=subprocess.PIPE,
)
out, _ = proc.communicate()
versions.append('virtualenv-{0}'.format(out.decode('UTF-8').strip()))
self.report.keyvalue("tool-versions:", " ".join(versions))
def _resolve_pkg(self, pkgspec):
try:
return self._spec2pkg[pkgspec]
except KeyError:
self._spec2pkg[pkgspec] = x = self._resolvepkg(pkgspec)
return x
def _resolvepkg(self, pkgspec):
if not os.path.isabs(str(pkgspec)):
return pkgspec
p = py.path.local(pkgspec)
if p.check():
return p
if not p.dirpath().check(dir=1):
raise tox.exception.MissingDirectory(p.dirpath())
self.report.info("determining %s" % p)
candidates = p.dirpath().listdir(p.basename)
if len(candidates) == 0:
raise tox.exception.MissingDependency(pkgspec)
if len(candidates) > 1:
items = []
for x in candidates:
ver = getversion(x.basename)
if ver is not None:
items.append((ver, x))
else:
self.report.warning("could not determine version of: %s" %
str(x))
items.sort()
if not items:
raise tox.exception.MissingDependency(pkgspec)
return items[-1][1]
else:
return candidates[0]
_rex_getversion = re.compile(r"[\w_\-\+\.]+-(.*)\.(zip|tar\.gz)")
def getversion(basename):
m = _rex_getversion.match(basename)
if m is None:
return None
version = m.group(1)
try:
return NormalizedVersion(version)
except IrrationalVersionError:
return None
|
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=protected-access
# pylint: disable=redefined-builtin
# pylint: disable=superfluous-parens
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
# pylint: disable=ungrouped-imports
# pylint: disable=wrong-import-order
from collections import defaultdict
import itertools
import operator
import os
import random
import re
import ssl
from subprocess import call
import tempfile
from urllib import parse
from Bio import Seq, SeqIO, SeqRecord
from Bio.Blast import NCBIXML
from Bio.Data import CodonTable
from Bio.Restriction import Restriction, Restriction_Dictionary
from Bio.SeqUtils.MeltingTemp import Tm_NN
import requests
from synbiochem.biochem4j import taxonomy
from synbiochem.utils import thread_utils
import numpy as np
NUCLEOTIDES = ['A', 'C', 'G', 'T']
AA_CODES = {'Ala': 'A',
'Cys': 'C',
'Asp': 'D',
'Glu': 'E',
'Phe': 'F',
'Gly': 'G',
'His': 'H',
'Ile': 'I',
'Lys': 'K',
'Leu': 'L',
'Met': 'M',
'Asn': 'N',
'Pro': 'P',
'Gln': 'Q',
'Arg': 'R',
'Ser': 'S',
'Thr': 'T',
'Val': 'V',
'Trp': 'W',
'Tyr': 'Y',
'Stop': '*'}
NA = 'NA'
K = 'K'
TRIS = 'TRIS'
MG = 'MG'
DNTP = 'DNTP'
__DEFAULT_REAG_CONC = {NA: 0.05, K: 0, TRIS: 0, MG: 0.01, DNTP: 0}
AA_COD = defaultdict(list)
for cod, am_ac in \
CodonTable.unambiguous_dna_by_name['Standard'].forward_table.items():
AA_COD[am_ac].append(cod)
ssl._create_default_https_context = ssl._create_unverified_context
def get_codon_usage_organisms(expand=False, verbose=False):
'''Gets name to taxonomy id dictionary of available codon usage tables.'''
destination = os.path.dirname(os.path.realpath(__file__))
filename = 'expand.txt' if expand else 'normal.txt'
filepath = os.path.join(destination, filename)
if not os.path.exists(filepath):
# Download:
if not os.path.exists(destination):
os.makedirs(destination)
url = 'ftp://ftp.kazusa.or.jp/pub/codon/current/species.table'
tmp = tempfile.NamedTemporaryFile(delete=False)
resp = requests.get(url, allow_redirects=True)
with open(tmp.name, 'w') as target_file:
target_file.write_bytes(resp.content)
# Read:
codon_orgs = _read_codon_usage_orgs_file(tmp.name)
# Expand:
if expand:
_expand_codon_usage_orgs(codon_orgs, verbose)
# Save:
_write_codon_usage_orgs_file(codon_orgs, filepath)
return codon_orgs
return _read_codon_usage_orgs_file(filepath)
class CodonOptimiser():
'''Class to support codon optimisation.'''
def __init__(self, taxonomy_id):
self.__taxonomy_id = taxonomy_id
self.__aa_to_codon_prob = self.__get_codon_usage()
self.__codon_prob = {item[0]: item[1]
for lst in self.__aa_to_codon_prob.values()
for item in lst}
self.__codon_to_w = {}
for key in self.__aa_to_codon_prob:
aa_dict = {a: b / self.__aa_to_codon_prob[key][0][1]
for a, b in self.__aa_to_codon_prob[key]}
self.__codon_to_w.update(aa_dict)
def get_codon_prob(self, codon):
'''Gets the codon probability.'''
return self.__codon_prob[codon]
def get_codon_optim_seq(self, protein_seq, excl_codons=None,
max_repeat_nuc=float('inf'), restr_enzyms=None,
max_attempts=1000, tolerant=False, stepback=3):
'''Returns a codon optimised DNA sequence.'''
if max_repeat_nuc == float('inf') and restr_enzyms is None:
return ''.join([self.get_random_codon(aa, excl_codons)
for aa in protein_seq])
attempts = 0
seq = ''
i = 0
blockage_i = -1
inv_patterns = 0
while attempts < max_attempts:
amino_acid = protein_seq[i]
new_seq = seq + self.get_random_codon(amino_acid, excl_codons)
invalids = find_invalid(new_seq, max_repeat_nuc,
restr_enzyms)
if len(invalids) == inv_patterns or \
(attempts == max_attempts - 1 and tolerant):
if i == blockage_i:
if attempts == max_attempts - 1:
inv_patterns = inv_patterns + 1
attempts = 0
seq = new_seq
if i == len(protein_seq) - 1:
return seq
i += 1
else:
blockage_i = max(i, blockage_i)
i = max(0, (invalids[-1] // 3) - stepback)
seq = seq[:i * 3]
attempts += 1
raise ValueError('Unable to generate codon-optimised sequence with '
'%i maximum repeating nucleotides.' % max_repeat_nuc)
def get_cai(self, dna_seq):
'''Gets the CAI for a given DNA sequence.'''
ws = []
for i in range(0, len(dna_seq), 3):
codon = dna_seq[i:i + 3]
if codon in self.__codon_to_w:
ws.append(self.__codon_to_w[codon])
return np.mean(ws)
def mutate(self, protein_seq, dna_seq, mutation_rate):
'''Mutate a protein-encoding DNA sequence according to a
supplied mutation rate.'''
return ''.join([self.get_random_codon(amino_acid)
if random.random() < mutation_rate
else dna_seq[3 * i:3 * (i + 1)]
for i, amino_acid in enumerate(protein_seq)])
def get_all_codons(self, amino_acid):
'''Returns all codons for a given amino acid.'''
return [t[0] for t in self.__aa_to_codon_prob[amino_acid]]
def get_best_codon(self, amino_acid):
'''Get 'best' codon for a given amino acid.'''
return self.__aa_to_codon_prob[amino_acid][0][0]
def get_random_codon(self, amino_acid, excl_codons=None):
'''Returns a random codon for a given amino acid,
based on codon probability from the codon usage table.'''
if excl_codons is None:
excl_codons = []
codon_usage = [codon_usage
for codon_usage in self.__aa_to_codon_prob[amino_acid]
if codon_usage[0] not in excl_codons]
if not codon_usage:
raise ValueError('No codons available for ' + amino_acid +
' after excluding ' + str(excl_codons))
while True:
rand = random.random()
cumulative_prob = 0
for codon, prob in iter(reversed(codon_usage)):
cumulative_prob += prob
if cumulative_prob > rand:
return codon
def __get_codon_usage(self):
'''Gets the codon usage table for a given taxonomy id.'''
aa_to_codon_prob = {aa_code: {} for aa_code in AA_CODES.values()}
url = 'http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=' \
+ self.__taxonomy_id + '&aa=1&style=GCG'
in_codons = False
resp = requests.get(url, allow_redirects=True)
for line in resp.iter_lines():
line = line.decode('utf-8')
if line == '<PRE>':
in_codons = True
elif line == '</PRE>':
break
elif in_codons:
values = re.split('\\s+', line)
am_acid = 'Stop' if values[0] == 'End' else values[0]
if am_acid in AA_CODES:
codon_prob = aa_to_codon_prob[AA_CODES[am_acid]]
codon_prob[values[1]] = float(values[3])
aa_to_codon_prob.update((x, _scale(y))
for x, y in aa_to_codon_prob.items())
return aa_to_codon_prob
def find_invalid(seq, max_repeat_nuc=float('inf'), restr_enzyms=None):
'''Finds invalid sequences.'''
inv = []
seq = seq.upper()
# Invalid repeating nucleotides:
if max_repeat_nuc != float('inf'):
pattern = [''.join([nucl] * (max_repeat_nuc + 1))
for nucl in NUCLEOTIDES]
pattern = re.compile(r'(?=(' + '|'.join(pattern) + '))')
inv = [m.start() for m in pattern.finditer(seq)]
# Invalid restriction sites:
if restr_enzyms:
for rest_enz in [_get_restr_type(name) for name in restr_enzyms]:
inv.extend(rest_enz.search(Seq.Seq(seq)))
return inv
def is_invalid(seq, max_repeat_nuc=float('inf'), restr_enzyms=None):
'''Checks whether a sequence is valid.'''
return len(find_invalid(seq, max_repeat_nuc, restr_enzyms)) > 0
def get_all_rev_trans(aa_seq):
'''Returns all reverse translations of amino acid sequence.'''
codons = [AA_COD[aa] for aa in aa_seq.strip()]
return [''.join(t) for t in list(itertools.product(*codons))]
def get_random_dna(length, max_repeat_nuc=float('inf'), restr_enzyms=None):
'''Returns a random sequence of DNA of the supplied length,
while adhering to a maximum number of repeating nucleotides.'''
max_attempts = 100
attempts = 0
len_add = 16
seq = ''
while True:
attempts += 1
if attempts > max_attempts:
raise ValueError('Unable to optimise sequence.')
while len(seq) < length:
seq += _get_random_dna(len_add)
if is_invalid(seq, max_repeat_nuc, restr_enzyms):
seq = seq[:-len_add]
if not is_invalid(seq, max_repeat_nuc, restr_enzyms):
return seq[:length]
return None
def mutate_seq(seq, mutations=1, alphabet=None):
'''Mutates sequence.'''
if alphabet is None:
alphabet = NUCLEOTIDES
seq_new = seq
for _ in range(mutations):
move = random.random()
pos = int(random.random() * len(seq))
base = random.choice(alphabet)
# Insert:
if move < 0.1:
seq_new = seq_new[1:pos + 1] + base + seq_new[pos + 1:]
# Delete:
elif move < 0.2:
seq_new = base + seq_new[:pos] + seq_new[pos + 1:]
# Replace:
else:
seq_new = seq_new[:pos] + base + seq_new[pos + 1:]
return seq_new
def get_melting_temp(dna1, dna2=None, reag_concs=None, strict=True):
'''Calculates melting temperarure of DNA sequence against its
complement, or against second DNA sequence using Nearest-Neighbour
method.'''
assert len(dna1) > 1
reagent_concs = __DEFAULT_REAG_CONC
if reag_concs is not None:
reagent_concs.update(reag_concs)
reagent_conc = {k: v * 1000 for k, v in reagent_concs.items()}
dnac1 = 30
return Tm_NN(dna1, check=True, strict=strict, c_seq=dna2, shift=0,
Na=reagent_conc[NA], K=reagent_conc[K],
Tris=reagent_conc[TRIS], Mg=reagent_conc[MG],
dNTPs=reagent_conc[DNTP],
dnac1=dnac1, dnac2=dnac1, selfcomp=dna2 is None,
saltcorr=7)
def get_seq_by_melt_temp(seq, target_melt_temp, forward=True,
terminii=None,
reagent_concs=None,
tol=0.025):
'''Returns a subsequence close to desired melting temperature.'''
if terminii is None:
terminii = ['A', 'C', 'G', 'T']
else:
terminii = [term.upper() for term in terminii]
best_delta_tm = float('inf')
best_subseq = ''
best_melt_temp = float('NaN')
in_tol = False
for i in range(3, len(seq)):
subseq = seq[:(i + 1)] if forward else seq[-(i + 1):]
melt_temp = get_melting_temp(subseq, None, reagent_concs)
if subseq[-1 if forward else 0].upper() in terminii:
delta_tm = abs(melt_temp - target_melt_temp)
if delta_tm / target_melt_temp < tol:
in_tol = True
if delta_tm < best_delta_tm:
best_delta_tm = delta_tm
best_subseq = subseq
best_melt_temp = melt_temp
elif in_tol:
break
if in_tol:
return best_subseq, best_melt_temp
raise ValueError('Unable to get sequence of required melting temperature')
def get_rand_seq_by_melt_temp(target_melt_temp,
max_repeat_nuc=float('inf'),
restr_enzyms=None,
reagent_concs=None,
tol=0.025):
'''Returns a random close to desired melting temperature.'''
seq = random.choice(NUCLEOTIDES)
while True:
seq += random.choice(NUCLEOTIDES)
if is_invalid(seq, max_repeat_nuc, restr_enzyms):
seq = random.choice(NUCLEOTIDES)
continue
melt_temp = get_melting_temp(seq, None, reagent_concs)
delta_tm = abs(melt_temp - target_melt_temp)
if delta_tm / target_melt_temp < tol:
return seq, melt_temp
raise ValueError('Unable to get sequence of required melting temperature')
def get_uniprot_values(uniprot_ids, fields, batch_size=128, verbose=False,
num_threads=0):
'''Gets dictionary of ids to values from Uniprot.'''
values = []
if num_threads:
thread_pool = thread_utils.ThreadPool(num_threads)
for i in range(0, len(uniprot_ids), batch_size):
thread_pool.add_task(_get_uniprot_batch, uniprot_ids, i,
batch_size, fields, values, verbose)
thread_pool.wait_completion()
else:
for i in range(0, len(uniprot_ids), batch_size):
_get_uniprot_batch(uniprot_ids, i, batch_size, fields, values,
verbose)
return {value['Entry']: value for value in values}
def search_uniprot(query, fields, limit=128):
'''Gets dictionary of ids to values from Uniprot.'''
values = []
url = 'http://www.uniprot.org/uniprot/?query=' + parse.quote(query) + \
'&sort=score&limit=' + str(limit) + \
'&format=tab&columns=id,' + ','.join([parse.quote(field)
for field in fields])
_parse_uniprot_data(url, values)
return values
def do_blast(id_seqs_subjects, id_seqs_queries, program='blastn',
dbtype='nucl', evalue=1.0, word_size=28):
'''Performs BLAST of query sequences against subject sequences.'''
db_filename = write_fasta(id_seqs_subjects)
query_filename = write_fasta(id_seqs_queries)
result_file = tempfile.NamedTemporaryFile(prefix='blast_result_',
suffix='.xml',
delete=False)
log_file = tempfile.NamedTemporaryFile(prefix='makeblastdb_log',
suffix='.txt',
delete=False)
call(['makeblastdb',
'-in', db_filename,
'-out', db_filename,
'-dbtype', dbtype,
'-logfile', log_file.name])
call([program,
'-query', query_filename,
'-db', db_filename,
'-out', result_file.name,
'-evalue', str(evalue),
'-word_size', str(word_size),
'-outfmt', '5'])
return NCBIXML.parse(open(result_file.name))
def do_clustal(in_data, is_fasta_file=False, result_file=None,
guidetree_file=None):
'''Performs Clustal Omega multiple sequence alignment.'''
result_file = tempfile.NamedTemporaryFile(prefix='clustalo_result_',
suffix='.fasta',
delete=False).name \
if result_file is None \
else result_file
guidetree_file = tempfile.NamedTemporaryFile(prefix='clustalo_tree_',
suffix='.dnd',
delete=False).name \
if guidetree_file is None \
else guidetree_file
call(['clustalo',
'-i', in_data if is_fasta_file else write_fasta(in_data),
'-o', result_file,
'--guidetree-out=' + guidetree_file,
'--force'])
return read_fasta(result_file)
def read_fasta(filename):
'''Reads a fasta file.'''
with open(filename, 'rU') as fle:
seqs = {record.id: str(record.seq)
for record in SeqIO.parse(fle, 'fasta')}
return seqs
def write_fasta(id_seqs, filename=None):
'''Writes a fasta file.'''
if filename is None:
temp_file = tempfile.NamedTemporaryFile(prefix='fasta_', suffix='.txt',
delete=False)
filename = temp_file.name
records = [SeqRecord.SeqRecord(Seq.Seq(seq), str(seq_id), '', '')
for seq_id, seq in id_seqs.items()]
SeqIO.write(records, filename, 'fasta')
return filename
def pcr(seq, forward_primer, reverse_primer):
'''Apply in silico PCR.'''
for_primer_pos = seq.find(forward_primer.upper())
rev_primer_pos = \
seq.find(str(Seq.Seq(reverse_primer).reverse_complement().upper()))
if for_primer_pos > -1 and rev_primer_pos > -1:
seq = seq[for_primer_pos:] + \
seq[:rev_primer_pos + len(reverse_primer)]
elif for_primer_pos > -1:
seq = seq[for_primer_pos:]
elif rev_primer_pos > -1:
seq = seq[:rev_primer_pos + len(reverse_primer)]
return seq, for_primer_pos
def _scale(codon_usage):
'''Scale codon usage values to add to 1.'''
sum_cdn_usage = sum(codon_usage.values())
if sum_cdn_usage:
codon_usage = {key: value / sum_cdn_usage
for key, value in codon_usage.items()}
else:
codon_usage = {key: 1 / len(codon_usage)
for key in codon_usage}
return sorted(codon_usage.items(), key=operator.itemgetter(1),
reverse=True)
def _get_random_dna(length):
'''Returns a random sequence of DNA of the supplied length.'''
return ''.join(random.choice(['A', 'C', 'G', 'T']) for _ in range(length))
def _get_uniprot_batch(uniprot_ids, i, batch_size, fields, values, verbose):
'''Get batch of Uniprot data.'''
if verbose:
print('seq_utils: getting Uniprot values ' + str(i) + ' - ' +
str(min(i + batch_size, len(uniprot_ids))) + ' / ' +
str(len(uniprot_ids)))
batch = uniprot_ids[i:min(i + batch_size, len(uniprot_ids))]
query = '+or+'.join(['id:' + uniprot_id for uniprot_id in batch])
url = 'https://www.uniprot.org/uniprot/?query=' + query + \
'&format=tab&columns=id,' + ','.join([parse.quote(field)
for field in fields])
_parse_uniprot_data(url, values)
def _parse_uniprot_data(url, values):
'''Parses Uniprot data.'''
headers = None
try:
resp = requests.get(url, allow_redirects=True)
for line in resp.iter_lines():
line = line.decode('utf-8')
tokens = line.strip().split('\t')
if headers is None:
headers = tokens
else:
resp = dict(zip(headers, tokens))
if 'Protein names' in resp:
regexp = re.compile(r'(?<=\()[^)]*(?=\))|^[^(][^()]*')
names = regexp.findall(resp.pop('Protein names'))
resp['Protein names'] = [nme.strip() for nme in names]
for key in resp:
if key.startswith('Cross-reference'):
resp[key] = resp[key].split(';')
values.append(resp)
except Exception as err:
print(err)
def _read_codon_usage_orgs_file(filename):
'''Reads Codon Usage Database table of species file.'''
codon_orgs = {}
with open(filename, 'r') as textfile:
next(textfile)
for line in textfile:
tokens = line.strip().split('\t')
codon_orgs[tokens[0]] = tokens[1]
return codon_orgs
def _expand_codon_usage_orgs(codon_orgs, verbose, max_errors=16):
'''Expand Codon Usage Db table of species with children and synonyms.'''
for tax_id in codon_orgs.values():
if verbose:
print('Expanding codon usage for NCBI Taxonomy id: ' + tax_id)
errors = 0
success = False
while not success:
try:
for name in taxonomy.get_synonyms_by_id(tax_id):
_add_codon_usage_org(codon_orgs, name, tax_id)
for child in taxonomy.get_children_by_id(tax_id):
_add_codon_usage_org(codon_orgs, child['name'], tax_id)
for name in child['names']:
_add_codon_usage_org(codon_orgs, name, tax_id)
success = True
except ConnectionError as err:
errors += 1
if errors == max_errors:
raise err
def _add_codon_usage_org(codon_orgs, name, tax_id):
'''Adds name to codon_orgs.'''
if name not in codon_orgs:
codon_orgs[name] = tax_id
def _write_codon_usage_orgs_file(codon_orgs, filepath):
'''Writes Codon Usage Database table of species file.'''
with open(filepath, 'w+') as fle:
fle.write('Name\tId\n')
for name, tax_id in codon_orgs.items():
fle.write(name + '\t' + tax_id + '\n')
def _get_restr_type(name):
'''Gets RestrictionType from name.'''
types = [
x for _, (x, y) in Restriction_Dictionary.typedict.items()
if name in y][0]
enz_types = tuple(getattr(Restriction, typ)
for typ in types)
return Restriction.RestrictionType(
str(name), enz_types, Restriction_Dictionary.rest_dict[name])
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ShortCodeTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.create(sid="SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'Sid': "SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://proxy.twilio.com/v1/Services/KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ShortCodes',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"short_code": "12345",
"iso_country": "US",
"capabilities": {
"sms_outbound": true,
"voice_inbound": false
},
"url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"is_reserved": false
}
'''
))
actual = self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.create(sid="SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://proxy.twilio.com/v1/Services/KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ShortCodes/SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.list()
self.holodeck.assert_has_request(Request(
'get',
'https://proxy.twilio.com/v1/Services/KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ShortCodes',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "short_codes",
"url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes?PageSize=50&Page=0"
},
"short_codes": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"short_code": "12345",
"iso_country": "US",
"capabilities": {
"sms_outbound": true,
"voice_inbound": false
},
"url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"is_reserved": false
}
]
}
'''
))
actual = self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.list()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://proxy.twilio.com/v1/Services/KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ShortCodes/SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"short_code": "12345",
"iso_country": "US",
"capabilities": {
"sms_outbound": true,
"voice_inbound": false
},
"url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"is_reserved": false
}
'''
))
actual = self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://proxy.twilio.com/v1/Services/KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ShortCodes/SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"short_code": "12345",
"iso_country": "US",
"capabilities": {
"sms_outbound": true,
"voice_inbound": false
},
"url": "https://proxy.twilio.com/v1/Services/KSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"is_reserved": true
}
'''
))
actual = self.client.proxy.v1.services("KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
|
|
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# author: <jellch@harris.com>
# scapy.contrib.description = PPI GEOLOCATION
# scapy.contrib.status = loads
"""
PPI-GEOLOCATION tags
"""
from __future__ import absolute_import
import struct, time
from scapy.packet import *
from scapy.fields import *
from scapy.contrib.ppi import PPIGenericFldHdr,addPPIType
from scapy.error import warning
import scapy.modules.six as six
from scapy.modules.six.moves import range
CURR_GEOTAG_VER = 2 #Major revision of specification
PPI_GPS = 30002
PPI_VECTOR = 30003
PPI_SENSOR = 30004
PPI_ANTENNA = 30005
#The FixedX_Y Fields are used to store fixed point numbers in a variety of fields in the GEOLOCATION-TAGS specification
class Fixed3_6Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed3_6: Internal value too negative: %d" % x)
x = 0
elif (x > 999999999):
warning("Fixed3_6: Internal value too positive: %d" % x)
x = 999999999
x = x * 1e-6
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -0.5e-6):
warning("Fixed3_6: Input value too negative: %.7f" % x)
x = 0
elif (x >= 999.9999995):
warning("Fixed3_6: Input value too positive: %.7f" % x)
x = 999.999999
x = int(round(x * 1e6))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
#Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self,pkt,x):
if x is None:
y=0
else:
y=self.i2h(pkt,x)
return "%3.6f"%(y)
class Fixed3_7Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed3_7: Internal value too negative: %d" % x)
x = 0
elif (x > 3600000000):
warning("Fixed3_7: Internal value too positive: %d" % x)
x = 3600000000
x = (x - 1800000000) * 1e-7
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -180.00000005):
warning("Fixed3_7: Input value too negative: %.8f" % x)
x = -180.0
elif (x >= 180.00000005):
warning("Fixed3_7: Input value too positive: %.8f" % x)
x = 180.0
x = int(round((x + 180.0) * 1e7))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
#Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self,pkt,x):
if x is None:
y=0
else:
y=self.i2h(pkt,x)
return "%3.7f"%(y)
class Fixed6_4Field(LEIntField):
def i2h(self, pkt, x):
if x is not None:
if (x < 0):
warning("Fixed6_4: Internal value too negative: %d" % x)
x = 0
elif (x > 3600000000):
warning("Fixed6_4: Internal value too positive: %d" % x)
x = 3600000000
x = (x - 1800000000) * 1e-4
return x
def h2i(self, pkt, x):
if x is not None:
if (x <= -180000.00005):
warning("Fixed6_4: Input value too negative: %.5f" % x)
x = -180000.0
elif (x >= 180000.00005):
warning("Fixed6_4: Input value too positive: %.5f" % x)
x = 180000.0
x = int(round((x + 180000.0) * 1e4))
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
#Try to return zero if undefined
x = self.h2i(pkt, 0)
return x
def i2repr(self,pkt,x):
if x is None:
y=0
else:
y=self.i2h(pkt,x)
return "%6.4f"%(y)
#The GPS timestamps fractional time counter is stored in a 32-bit unsigned ns counter.
#The ept field is as well,
class NSCounter_Field(LEIntField):
def i2h(self, pkt, x): #converts nano-seconds to seconds for output
if x is not None:
if (x < 0):
warning("NSCounter_Field: Internal value too negative: %d" % x)
x = 0
elif (x >= 2**32):
warning("NSCounter_Field: Internal value too positive: %d" % x)
x = 2**32-1
x = (x / 1e9)
return x
def h2i(self, pkt, x): #converts input in seconds into nano-seconds for storage
if x is not None:
if (x < 0):
warning("NSCounter_Field: Input value too negative: %.10f" % x)
x = 0
elif (x >= (2**32) / 1e9):
warning("NSCounter_Field: Input value too positive: %.10f" % x)
x = (2**32-1) / 1e9
x = int(round((x * 1e9)))
return x
def i2repr(self,pkt,x):
if x is None:
y=0
else:
y=self.i2h(pkt,x)
return "%1.9f"%(y)
class LETimeField(UTCTimeField,LEIntField):
__slots__ = ["epoch", "delta", "strf"]
def __init__(self, name, default, epoch=None, strf="%a, %d %b %Y %H:%M:%S +0000"):
LEIntField.__init__(self, name, default)
UTCTimeField.__init__(self, name, default, epoch=epoch, strf=strf)
class SignedByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "b")
def randval(self):
return RandSByte()
class XLEShortField(LEShortField,XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
class XLEIntField(LEIntField,XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
class GPSTime_Field(LETimeField):
def __init__(self, name, default):
return LETimeField.__init__(self, name, default, strf="%a, %d %b %Y %H:%M:%S UTC")
class VectorFlags_Field(XLEIntField):
"""Represents te VectorFlags field. Handles the RelativeTo:sub-field"""
_fwdstr = "DefinesForward"
_resmask = 0xfffffff8
_relmask = 0x6
_relnames = ["RelativeToForward", "RelativeToEarth", "RelativeToCurrent", "RelativeToReserved"]
_relvals = [0x00, 0x02, 0x04, 0x06]
def i2repr(self, pkt, x):
if x is None:
return str(x)
r = []
if (x & 0x1):
r.append(self._fwdstr)
i = (x & self._relmask) >> 1
r.append(self._relnames[i])
i = x & self._resmask
if (i):
r.append("ReservedBits:%08X" % i)
sout = "+".join(r)
return sout
def any2i(self, pkt, x):
if isinstance(x, str):
r = x.split("+")
y = 0
for value in r:
if (value == self._fwdstr):
y |= 0x1
elif (value in self._relnames):
i = self._relnames.index(value)
y &= (~self._relmask)
y |= self._relvals[i]
else:
#logging.warning("Unknown VectorFlags Argument: %s" % value)
pass
else:
y = x
#print "any2i: %s --> %s" % (str(x), str(y))
return y
class HCSIFlagsField(FlagsField):
""" A FlagsField where each bit/flag turns a conditional field on or off.
If the value is None when building a packet, i2m() will check the value of
every field in self.names. If the field's value is not None, the corresponding
flag will be set. """
def i2m(self, pkt, val):
if val is None:
val = 0
if (pkt):
for i, name in enumerate(self.names):
value = pkt.getfieldval(name)
if value is not None:
val |= 1 << i
return val
class HCSINullField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=0)
class HCSIDescField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=32)
class HCSIAppField(StrFixedLenField):
def __init__(self, name, default):
return StrFixedLenField.__init__(self, name, default, length=60)
def _FlagsList(myfields):
flags = ["Reserved%02d" % i for i in range(32)]
for i, value in six.iteritems(myfields):
flags[i] = value
return flags
# Define all geolocation-tag flags lists
_hcsi_gps_flags = _FlagsList({0:"No Fix Available", 1:"GPS", 2:"Differential GPS",
3:"Pulse Per Second", 4:"Real Time Kinematic",
5:"Float Real Time Kinematic", 6:"Estimated (Dead Reckoning)",
7:"Manual Input", 8:"Simulation"})
#_hcsi_vector_flags = _FlagsList({0:"ForwardFrame", 1:"RotationsAbsoluteXYZ", 5:"OffsetFromGPS_XYZ"})
#This has been replaced with the VectorFlags_Field class, in order to handle the RelativeTo:subfield
_hcsi_vector_char_flags = _FlagsList({0:"Antenna", 1:"Direction of Travel",
2:"Front of Vehicle", 3:"Angle of Arrival", 4:"Transmitter Position",
8:"GPS Derived", 9:"INS Derived", 10:"Compass Derived",
11:"Acclerometer Derived", 12:"Human Derived"})
_hcsi_antenna_flags = _FlagsList({ 1:"Horizontal Polarization", 2:"Vertical Polarization",
3:"Circular Polarization Left", 4:"Circular Polarization Right",
16:"Electronically Steerable", 17:"Mechanically Steerable"})
""" HCSI PPI Fields are similar to RadioTap. A mask field called "present" specifies if each field
is present. All other fields are conditional. When dissecting a packet, each field is present if
"present" has the corresponding bit set. When building a packet, if "present" is None, the mask is
set to include every field that does not have a value of None. Otherwise, if the mask field is
not None, only the fields specified by "present" will be added to the packet.
To build each Packet type, build a list of the fields normally, excluding the present bitmask field.
The code will then construct conditional versions of each field and add the present field.
See GPS_Fields as an example. """
# Conditional test for all HCSI Fields
def _HCSITest(pkt, ibit, name):
if pkt.present is None:
return (pkt.getfieldval(name) is not None)
return pkt.present & ibit
# Wrap optional fields in ConditionalField, add HCSIFlagsField
def _HCSIBuildFields(fields):
names = [f.name for f in fields]
cond_fields = [HCSIFlagsField('present', None, -len(names), names)]
for i, name in enumerate(names):
ibit = 1 << i
seval = "lambda pkt:_HCSITest(pkt,%s,'%s')" % (ibit, name)
test = eval(seval)
cond_fields.append(ConditionalField(fields[i], test))
return cond_fields
class HCSIPacket(Packet):
name = "PPI HCSI"
fields_desc = [ LEShortField('pfh_type', None),
LEShortField('pfh_length', None),
ByteField('geotag_ver', CURR_GEOTAG_VER),
ByteField('geotag_pad', 0),
LEShortField('geotag_len', None)]
def post_build(self, p, pay):
if self.pfh_length is None:
l = len(p) - 4
sl = struct.pack('<H',l)
p = p[:2] + sl + p[4:]
if self.geotag_len is None:
l_g = len(p) - 4
sl_g = struct.pack('<H',l_g)
p = p[:6] + sl_g + p[8:]
p += pay
return p
def extract_padding(self, p):
return "",p
#GPS Fields
GPS_Fields = [FlagsField("GPSFlags", None, -32, _hcsi_gps_flags),
Fixed3_7Field("Latitude", None),
Fixed3_7Field("Longitude", None), Fixed6_4Field("Altitude", None),
Fixed6_4Field("Altitude_g", None), GPSTime_Field("GPSTime", None),
NSCounter_Field("FractionalTime", None), Fixed3_6Field("eph", None),
Fixed3_6Field("epv", None), NSCounter_Field("ept", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class GPS(HCSIPacket):
name = "PPI GPS"
fields_desc = [ LEShortField('pfh_type', PPI_GPS), #pfh_type
LEShortField('pfh_length', None), #pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
ByteField('geotag_pad', 0), #base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(GPS_Fields)
#Vector Fields
VEC_Fields = [VectorFlags_Field("VectorFlags", None),
FlagsField("VectorChars", None, -32, _hcsi_vector_char_flags),
Fixed3_6Field("Pitch", None), Fixed3_6Field("Roll", None),
Fixed3_6Field("Heading", None), Fixed6_4Field("Off_X", None),
Fixed6_4Field("Off_Y", None), Fixed6_4Field("Off_Z", None),
HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
Fixed3_6Field("Err_Rot", None), Fixed6_4Field("Err_Off", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSINullField("Reserved26", None), HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Vector(HCSIPacket):
name = "PPI Vector"
fields_desc = [ LEShortField('pfh_type', PPI_VECTOR), #pfh_type
LEShortField('pfh_length', None), #pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
ByteField('geotag_pad', 0), #base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(VEC_Fields)
#Sensor Fields
# http://www.iana.org/assignments/icmp-parameters
sensor_types= { 1 : "Velocity",
2 : "Acceleration",
3 : "Jerk",
100 : "Rotation",
101 : "Magnetic",
1000: "Temperature",
1001: "Barometer",
1002: "Humidity",
2000: "TDOA_Clock",
2001: "Phase"
}
SENS_Fields = [ LEShortEnumField('SensorType', None, sensor_types),
SignedByteField('ScaleFactor', None),
Fixed6_4Field('Val_X', None),
Fixed6_4Field('Val_Y', None),
Fixed6_4Field('Val_Z', None),
Fixed6_4Field('Val_T', None),
Fixed6_4Field('Val_E', None),
HCSINullField("Reserved07", None), HCSINullField("Reserved08", None),
HCSINullField("Reserved09", None), HCSINullField("Reserved10", None),
HCSINullField("Reserved11", None), HCSINullField("Reserved12", None),
HCSINullField("Reserved13", None), HCSINullField("Reserved14", None),
HCSINullField("Reserved15", None), HCSINullField("Reserved16", None),
HCSINullField("Reserved17", None), HCSINullField("Reserved18", None),
HCSINullField("Reserved19", None), HCSINullField("Reserved20", None),
HCSINullField("Reserved21", None), HCSINullField("Reserved22", None),
HCSINullField("Reserved23", None), HCSINullField("Reserved24", None),
HCSINullField("Reserved25", None), HCSINullField("Reserved26", None),
HCSINullField("Reserved27", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Sensor(HCSIPacket):
name = "PPI Sensor"
fields_desc = [ LEShortField('pfh_type', PPI_SENSOR), #pfh_type
LEShortField('pfh_length', None), #pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER ), #base_geotag_header.ver
ByteField('geotag_pad', 0), #base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(SENS_Fields)
# HCSIAntenna Fields
ANT_Fields = [FlagsField("AntennaFlags", None, -32, _hcsi_antenna_flags),
ByteField("Gain", None),
Fixed3_6Field("HorizBw", None), Fixed3_6Field("VertBw", None),
Fixed3_6Field("PrecisionGain",None), XLEShortField("BeamID", None),
HCSINullField("Reserved06", None), HCSINullField("Reserved07", None),
HCSINullField("Reserved08", None), HCSINullField("Reserved09", None),
HCSINullField("Reserved10", None), HCSINullField("Reserved11", None),
HCSINullField("Reserved12", None), HCSINullField("Reserved13", None),
HCSINullField("Reserved14", None), HCSINullField("Reserved15", None),
HCSINullField("Reserved16", None), HCSINullField("Reserved17", None),
HCSINullField("Reserved18", None), HCSINullField("Reserved19", None),
HCSINullField("Reserved20", None), HCSINullField("Reserved21", None),
HCSINullField("Reserved22", None), HCSINullField("Reserved23", None),
HCSINullField("Reserved24", None), HCSINullField("Reserved25", None),
HCSIDescField("SerialNumber", None), HCSIDescField("ModelName", None),
HCSIDescField("DescString", None), XLEIntField("AppId", None),
HCSIAppField("AppData", None), HCSINullField("Extended", None)]
class Antenna(HCSIPacket):
name = "PPI Antenna"
fields_desc = [ LEShortField('pfh_type', PPI_ANTENNA), #pfh_type
LEShortField('pfh_length', None), #pfh_len
ByteField('geotag_ver', CURR_GEOTAG_VER), #base_geotag_header.ver
ByteField('geotag_pad', 0), #base_geotag_header.pad
LEShortField('geotag_len', None)] + _HCSIBuildFields(ANT_Fields)
addPPIType(PPI_GPS, GPS)
addPPIType(PPI_VECTOR, Vector)
addPPIType(PPI_SENSOR, Sensor)
addPPIType(PPI_ANTENNA,Antenna)
|
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
# ------------------------------------------------------------------
# Helper Functions
def id_func(x):
if isinstance(x, tuple):
assert len(x) == 2
return x[0].__name__ + "-" + str(x[1])
else:
return x.__name__
# ------------------------------------------------------------------
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
# zero-dim integer array behaves like an integer
return request.param
zeros = [
box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
[box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]]
)
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([np.array(-0.0, dtype=np.float64)])
zeros.extend([0, 0.0, -0.0])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(
params=[
pd.Float64Index(np.arange(5, dtype="float64")),
pd.Int64Index(np.arange(5, dtype="int64")),
pd.UInt64Index(np.arange(5, dtype="uint64")),
pd.RangeIndex(5),
],
ids=lambda x: type(x).__name__,
)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(
params=[
pd.Timedelta("5m4s").to_pytimedelta(),
pd.Timedelta("5m4s"),
pd.Timedelta("5m4s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Day(3),
pd.offsets.Hour(72),
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta("72:00:00"),
np.timedelta64(3, "D"),
np.timedelta64(72, "h"),
],
ids=lambda x: type(x).__name__,
)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 3-day timedelta
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Hour(2),
pd.offsets.Minute(120),
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, "h"),
np.timedelta64(120, "m"),
],
ids=lambda x: type(x).__name__,
)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 2-hour timedelta
"""
return request.param
_common_mismatch = [
pd.offsets.YearBegin(2),
pd.offsets.MonthBegin(1),
pd.offsets.Minute(),
]
@pytest.fixture(
params=[
pd.Timedelta(minutes=30).to_pytimedelta(),
np.timedelta64(30, "s"),
pd.Timedelta(seconds=30),
]
+ _common_mismatch
)
def not_hourly(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Hourly frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(4, "h"),
pd.Timedelta(hours=23).to_pytimedelta(),
pd.Timedelta("23:00:00"),
]
+ _common_mismatch
)
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(365, "D"),
pd.Timedelta(days=365).to_pytimedelta(),
pd.Timedelta(days=365),
]
+ _common_mismatch
)
def mismatched_freq(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Monthly or Annual frequencies.
"""
return request.param
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame], ids=id_func)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[pd.Index, pd.Series, pytest.param(pd.DataFrame, marks=pytest.mark.xfail)],
ids=id_func,
)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
@pytest.fixture(
params=[
(pd.Index, False),
(pd.Series, False),
(pd.DataFrame, False),
pytest.param((pd.DataFrame, True), marks=pytest.mark.xfail),
],
ids=id_func,
)
def box_transpose_fail(request):
"""
Fixture similar to `box` but testing both transpose cases for DataFrame,
with the tranpose=True case xfailed.
"""
# GH#23620
return request.param
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, tm.to_array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
|
|
# Copyright 2012 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import uno
import unohelper
from com.sun.star.frame import XController, XTitle, \
XDispatchProvider, XDispatchInformationProvider, \
XDispatch, FeatureStateEvent, DispatchInformation
from com.sun.star.task import XStatusIndicatorSupplier
from com.sun.star.view import XSelectionSupplier, XViewSettingsSupplier
from com.sun.star.beans import XPropertySet, XMultiPropertySet, \
XPropertySetInfo, \
Property, UnknownPropertyException, PropertyVetoException
class PropertySetInfo(unohelper.Base, XPropertySetInfo):
def __init__(self, props):
self.props = props
def get_index(self, name):
for i, prop in enumerate(self.props):
if name == prop[0]:
return i
return None
def getProperties(self):
_props = []
for prop in self.props:
_props.append(Property(*prop))
return tuple(_props)
def getPropertyByName(self, name):
i = self.get_index(name)
if i is None:
raise UnknownPropertyException(name, self)
p = self.props[i]
return Property(*p)
def hasPropertyByName(self, name):
return self.get_index(name) != None
class ViewSettings(unohelper.Base, XPropertySet, XMultiPropertySet):
""" Class provides view settings. """
def __init__(self, controller):
self.controller = controller
# XPropertySet
def getPropertySetInfo(self):
try:
bool_type = uno.getTypeByName("boolean")
return PropertySetInfo(
(
("ShowName", -1, bool_type, 16),
("ShowTags", -1, bool_type, 0),
("ShowValue", -1, bool_type, 0),
("ShowDescription", -1, bool_type, 0)
)
)
except Exception as e:
print(e)
return None
def setPropertyValue(self, name, value):
self.controller.set_view_state(name, value)
def getPropertyValue(self, name):
return self.controller.get_view_state(name)
def addPropertyChangeListener(self, name, listener): pass
def removePropertyChangeListener(self, name, listener): pass
def addVetoableChangeListener(self, name, listener): pass
def removeVetoableChangeListener(self, name, listener): pass
# XMultiPropertySet
def setPropertyValues(self, names, values):
column_changed = False
for name, value in zip(names, values):
if name.startswith("Show"):
self.setPropertyValue(name, value)
column_changed = True
if column_changed:
self.imple.column_state_changed()
def getPropertyValues(self, names):
return tuple([self.getPropertyValue(name) for name in names])
def addPropertiesChangeListener(self, names, listener): pass
def removePropertiesChangeListener(self, names, listener): pass
class Dispatcher(unohelper.Base, XDispatch):
def __init__(self, controller, processed_commands):
self.controller = controller
self.processed_commands = processed_commands
self.controls = {}
self.urls = {}
def clear(self):
self.urls.clear()
self.controls.clear()
def register_url(self, url):
self.urls[url.Complete] = url
def set_enable(self, complete, state, arg=None):
url = self.urls.get(complete, None)
if url:
self.broadcast_status(
complete,
FeatureStateEvent(self, url, "bar", state, False, arg)
)
def broadcast_status(self, complete, ev):
controls = self.controls.get(complete, None)
if controls:
for control in controls:
control.statusChanged(ev)
# XDispatch
def dispatch(self, url, args):
if url.Complete in self.processed_commands:
try:
self.controller.do_dispatch(url, args)
except Exception as e:
print(e)
def addStatusListener(self, control, url):
complete = url.Complete
if complete in self.processed_commands:
controls = self.controls.get(complete, None)
if not controls:
controls = []
self.controls[complete] = controls
controls.append(control)
state = self.controller.get_command_state(complete)
arg = self.controller.get_command_arg(complete)
self.set_enable(complete, state, arg)
def removeStatusListener(self, control, url):
complete = url.Complete
if complete in self.processed_commands:
try:
controls = self.controls.get(complete, None)
if controls:
while True:
controls.remove(control)
except:
pass
import bookmarks.dispatch as COMMANDS
from bookmarks.tools import get_config
from bookmarks.base import ComponentBase, ServiceInfo
from bookmarks.tree import HistoryRootNode, \
BookmarksNode, BookmarksMenuTreeContainerNode, BookmarksMenuTreeRootNode, \
TagNode, TagsTreeContainerNode, TagsTreeRootNode, UnsortedBookmarksRootNode
class UIController(unohelper.Base, ComponentBase,
XController, XTitle, #XPropertySet,
XDispatchProvider, XDispatchInformationProvider,
XStatusIndicatorSupplier,
XSelectionSupplier, XViewSettingsSupplier, ServiceInfo):
""" Provides controller which connects between frame and model. """
from bookmarks import VIEW_IMPLE_NAME as IMPLE_NAME
from bookmarks import VIEW_SERVICE_NAMES as SERVICE_NAMES
from bookmarks import COMMAND_PROTOCOL as CMD_PROTOCOL
UNO_PROTOCOL = ".uno:"
CMD_UNDO = UNO_PROTOCOL + COMMANDS.CMD_UNDO
CMD_REDO = UNO_PROTOCOL + COMMANDS.CMD_REDO
CMD_COPY = UNO_PROTOCOL + COMMANDS.CMD_COPY
CMD_SAVE = UNO_PROTOCOL + COMMANDS.CMD_SAVE
CMD_BACK = CMD_PROTOCOL + COMMANDS.CMD_BACK
CMD_FORWARD = CMD_PROTOCOL + COMMANDS.CMD_FORWARD
PROCESSED_COMMANDS = None
def __init__(self, ctx, imple, frame, args=()):
ComponentBase.__init__(self)
self.ctx = ctx
self._locked = False
self.suspended = False
self.CreationArguments = args
self.ViewControllerName = "Default"
self.ComponentWindow = None
self.frame = frame
self.model = None
self.imple = imple
if self.__class__.PROCESSED_COMMANDS is None:
uno_commands = [self.UNO_PROTOCOL + key
for key in COMMANDS.UNO_COMMANDS.keys()]
custom_commands = [self.CMD_PROTOCOL + key
for key in COMMANDS.CUSTOM_COMMANDS.keys()]
self.__class__.PROCESSED_COMMANDS = set(uno_commands + custom_commands)
self.dispatcher = Dispatcher(self, self.PROCESSED_COMMANDS)
def lock(self):
self._locked = True
def unlock(self):
self._locked = False
def do_dispatch(self, url, args):
""" Executed dispatch. """
self.do_action_by_name(url.Path)
def enable_command(self, command, state):
""" Change state of the dispatch. """
if command in COMMANDS.UNO_COMMANDS:
command = self.UNO_PROTOCOL + command
else:
command = self.CMD_PROTOCOL + command
self.dispatcher.set_enable(command, state)
def update_undo_redo_state(self):
""" Update state of undo and redo. """
self.dispatcher.set_enable(
self.CMD_UNDO,
self.imple.undostack.can_undo(),
self.get_command_arg(self.CMD_UNDO))
self.dispatcher.set_enable(
self.CMD_REDO,
self.imple.undostack.can_redo(),
self.get_command_arg(self.CMD_REDO))
def update_copy_state(self):
""" Update only copy state. """
self.dispatcher.set_enable(
self.CMD_COPY, self.imple.clipboard.has_data())
def update_save_state(self):
self.dispatcher.set_enable(
self.CMD_SAVE, self.imple.manager.modified)
def update_history_state(self):
self.dispatcher.set_enable(
self.CMD_BACK,
self.imple.history.has_previous(),
self.get_command_arg(self.CMD_BACK))
self.dispatcher.set_enable(
self.CMD_FORWARD,
self.imple.history.has_next(),
self.get_command_arg(self.CMD_FORWARD))
def get_command_arg(self, complete):
""" Get dispatch arguments. """
if complete == self.CMD_UNDO:
name = self.imple.undostack.get_undo_name()
if name:
return self.imple._("Undo: %s") % name
elif complete == self.CMD_REDO:
name = self.imple.undostack.get_redo_name()
if name:
return self.imple._("Redo: %s") % name
elif complete == self.CMD_BACK:
name = self.imple.history.get_previous_name()
if name:
return name
elif complete == self.CMD_FORWARD:
name = self.imple.history.get_next_name()
if name:
return name
return None
def get_command_state(self, complete):
""" Get dispatch state by the command url. """
imple = self.imple
view_mode = imple.get_view_mode()
if complete.startswith(self.UNO_PROTOCOL):
path = complete[5:]
if path == COMMANDS.CMD_EXPORTTO:
return True
elif path == COMMANDS.CMD_UNDO:
return self.imple.undostack.can_undo()
elif path == COMMANDS.CMD_REDO:
return self.imple.undostack.can_redo()
elif path == COMMANDS.CMD_PASTE:
if view_mode & imple.MODE_BOOKMRAKS:
return self.imple.clipboard.has_data()
elif path == COMMANDS.CMD_SELECTALL:
window = self.imple.window
return window.get_mode() == window.MODE_GRID
elif path == COMMANDS.CMD_COPY:
window = imple.window
if window.get_mode() == window.MODE_TREE:
return view_mode & imple.MODE_BOOKMRAKS
else:
return not ((view_mode & imple.MODE_TAG) and \
(view_mode & imple.MODE_ROOT))
elif path == COMMANDS.CMD_CUT or \
path == COMMANDS.CMD_DELETE:
window = imple.window
if window.get_mode() == window.MODE_TREE:
return (view_mode & imple.MODE_BOOKMRAKS) and \
not (view_mode & imple.MODE_ROOT)
elif not (view_mode & imple.MODE_HISTORY):
if path == COMMANDS.CMD_CUT:
if view_mode & imple.MODE_TAG:
return not (view_mode & imple.MODE_ROOT)
return True
elif path == COMMANDS.CMD_INSERTDOC:
return self.imple.get_view_mode() & self.imple.MODE_BOOKMRAKS
elif path == COMMANDS.CMD_SAVE:
return self.imple.manager.modified
return False
elif complete.startswith(self.CMD_PROTOCOL):
path = complete[len(self.CMD_PROTOCOL):]
if path == COMMANDS.CMD_BACK:
return self.imple.history.has_previous()
elif path == COMMANDS.CMD_FORWARD:
return self.imple.history.has_next()
elif path == COMMANDS.CMD_MOVE:
if (view_mode & imple.MODE_BOOKMRAKS) or \
(view_mode & imple.MODE_UNSORTED):
window = self.imple.window
if window.get_mode() == window.MODE_TREE:
return not (view_mode & imple.MODE_ROOT)
return True
elif path.startswith("Insert"):
return view_mode & self.imple.MODE_BOOKMRAKS
elif path == COMMANDS.CMD_MIGRATION:
return True
elif path == COMMANDS.CMD_ABOUT:
return True
elif path == COMMANDS.CMD_NEW_MENU:
return True
return False
def mode_changed(self):
""" Mode changed on the window. """
dispatcher = self.dispatcher
def set_state(command):
state = self.get_command_state(command)
dispatcher.set_enable(command, state)
set_state(self.UNO_PROTOCOL + COMMANDS.CMD_CUT)
set_state(self.UNO_PROTOCOL + COMMANDS.CMD_COPY)
set_state(self.UNO_PROTOCOL + COMMANDS.CMD_PASTE)
set_state(self.UNO_PROTOCOL + COMMANDS.CMD_DELETE)
set_state(self.CMD_PROTOCOL + COMMANDS.CMD_MOVE)
set_state(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_BOOKMRAK)
set_state(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_FOLDER)
set_state(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_SEPARATOR)
# XViewSettingsSupplier
def getViewSettings(self):
return ViewSettings(self)
def get_view_state(self, name):
if name.startswith("Show"):
try:
return self.imple.column_state[name[4:]]
except:
pass
raise UnknownPropertyException(name, self)
def set_view_state(self, name, state):
if name.startswith("Show"):
if name == "ShowName":
raise PropertyVetoException(name, self)
if state != self.imple.column_state[name[4:]]:
self.imple.column_state[name[4:]] = state
self.imple.column_state_changed()
return
raise UnknownPropertyException(name, self)
# XPropertySet
def getPropertySetInfo(self):
return PropertySetInfo(())
def setPropertyValue(self, name, value):
raise UnknownPropertyException(name, self)
def getPropertyValue(self, name):
raise UnknownPropertyException(name, self)
def addPropertyChangeListener(self, name, listener): pass
def removePropertyChangeListener(self, name, listener): pass
def addVetoableChangeListener(self, name, listener): pass
def removeVetoableChangeListener(self, name, listener): pass
# XTitle
def getTitle(self):
return self.imple.manager.bookmark_name
def setTitle(self, title):
self.frame.setTitle(title)
def dispose(self):
try:
ComponentBase.dispose(self)
self.imple.window_closed()
self.dispatcher.clear()
self.frame = None
self.model = None
except Exception as e:
print(e)
# XController2
#ComponentWindow = property()
# XController
def suspend(self, suspend):
_suspend = True
if suspend:
if self.imple.manager.modified and not self.suspended:
_suspend = self.imple.query_saving()
if _suspend:
self.suspended = True
else:
self.suspended = False
return _suspend
def attachFrame(self, frame):
self.frame = frame
def attachModel(self, model):
self.model = model
model.connectController(self)
model.setCurrentController(self)
return False
def getModel(self):
return self.model
def getFrame(self):
return self.frame
def getStatusIndicator(self):
return None
def restoreViewData(self, data):
pass
def getViewData(self):
ps = self.frame.getContainerWindow().getPosSize()
window = self.imple.window
d = self.imple.window.get_column_width()
for k, v in self.imple.column_state.items():
if not k in d:
d[k] = 0
try:
return ";".join((
",".join((str(ps.X), str(ps.Y), str(ps.Width), str(ps.Height))),
str(window.tree.getPosSize().Width),
",".join([str(int(self.imple.column_state[name]))
for name in self.imple.COLUMN_NAMES]),
",".join([str(d[name])
for name in self.imple.COLUMN_NAMES])
))
except:
pass
return ""
# XSelectionSupplier
def select(self, obj):
return False
def getSelection(self):
return None
def addSelectionChangeListener(self, listener): pass
def removeSelectionChangeListener(self, listener): pass
# XDispatchProvider
def queryDispatches(self, requests): pass
def queryDispatch(self, url, name, flags):
command = url.Complete
if command in self.PROCESSED_COMMANDS or \
command.startswith(self.CMD_PROTOCOL):
self.dispatcher.register_url(url)
return self.dispatcher
return None
# XDispatchInformationProvider
def getSupportedCommandGroups(self):
# Application, View, Edit, Insert
return (1, 2, 4, 9,)
def getConfigurableDispatchInformation(self, group):
if group == 1:
return (
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_ABOUT, 1),
)
elif group == 2:
return (
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_BACK, 2),
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_FORWARD, 2),
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_OPEN, 2),
)
elif group == 4:
return (
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_MOVE, 4),
)
elif group == 9:
return (
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_BOOKMRAK, 9),
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_SEPARATOR, 9),
DispatchInformation(self.CMD_PROTOCOL + COMMANDS.CMD_INSERT_FOLDER, 9),
)
return ()
def change_display_item(self, mode=None):
""" Update request to show item. """
self.imple.change_display_item(mode)
self.mode_changed()
def change_display_container(self):
""" Update request to show container item. """
self.imple.change_display_container()
self.mode_changed()
def data_update_request(self, mode, update_mode):
""" Request to update data with new value. """
self.imple.data_update_request(mode, update_mode)
def check_item_is_container(self, index):
""" Check the specific item is a container or not. """
return self.imple.check_item_is_container(index)
def move_from_tree(self, data_node, pos_type, dest_node=None, dest_index=None, is_copy=False):
""" Move item from tree by drag and drop. """
self.imple.move_from_tree(data_node, pos_type, dest_node, dest_index, is_copy)
def move_from_grid(self, data_positions, pos_type, dest_node=None, dest_index=None, is_copy=False):
""" Move item from grid by drag and drop. """
self.imple.move_from_grid(data_positions, pos_type, dest_node, dest_index, is_copy)
def get_value1(self):
self.imple.get_value1()
def get_value2(self):
try:
self.imple.get_value2()
except Exception as e:
print(e)
def can_move(self):
""" Check the current item can be moved. """
imple = self.imple
view_mode = imple.get_view_mode()
mode = imple.window.get_mode()
if mode == imple.window.MODE_TREE:
if view_mode & imple.MODE_BOOKMRAKS:
return not (view_mode & imple.MODE_ROOT)
elif view_mode & imple.MODE_TAG:
return not (view_mode & imple.MODE_ROOT)
elif mode == imple.window.MODE_GRID:
if view_mode & imple.MODE_TAG:
return not (view_mode & imple.MODE_ROOT)
return (view_mode & imple.MODE_BOOKMRAKS) or \
(view_mode & imple.MODE_UNSORTED)
return False
def can_move_to(self, node, pos, copy):
""" Check the item can be move to specific position. """
if isinstance(node, BookmarksMenuTreeContainerNode):
return pos
elif isinstance(node, BookmarksMenuTreeRootNode):
return self.imple.window.POSITION_ITEM
elif isinstance(node, TagsTreeContainerNode):
return self.imple.window.POSITION_ITEM
elif isinstance(node, UnsortedBookmarksRootNode):
return self.imple.window.POSITION_ITEM
return self.imple.window.POSITION_NONE
def fill_menu(self, type, menu):
""" Fill menu items. """
commands = COMMANDS
_ = self.imple._
window = self.imple.window
bookmarks_config = get_config(self.imple.ctx,
"/org.openoffice.Office.UI.BookmarksCommands/UserInterface/Commands")
generic_config = get_config(self.imple.ctx,
"/org.openoffice.Office.UI.GenericCommands/UserInterface/Commands")
def get_label(name, default, bookmarks):
if bookmarks:
config = bookmarks_config
else:
config = generic_config
if config.hasByName(name):
return config.getByName(name).Label
return default
items = [
(commands.ID_OPEN,
self.CMD_PROTOCOL + commands.CMD_OPEN, "~Open", 1),
None,
(commands.ID_INSERT_BOOKMRAK,
self.CMD_PROTOCOL + commands.CMD_INSERT_BOOKMRAK, "New ~Bookmark", 1),
(commands.ID_INSERT_SEPARATOR,
self.CMD_PROTOCOL + commands.CMD_INSERT_SEPARATOR, "New ~Separator", 1),
(commands.ID_INSERT_FOLDER,
self.CMD_PROTOCOL + commands.CMD_INSERT_FOLDER, "New ~Folder", 1),
None,
(commands.ID_CUT,
self.UNO_PROTOCOL + commands.CMD_CUT, "Cu~t", 0),
(commands.ID_COPY,
self.UNO_PROTOCOL + commands.CMD_COPY, "~Copy", 0),
(commands.ID_PASTE,
self.UNO_PROTOCOL + commands.CMD_PASTE, "~Paste", 0),
None,
(commands.ID_DELETE,
self.CMD_PROTOCOL + commands.CMD_DELETE, "~Delete", 1),
None,
(commands.ID_SELECTALL,
self.UNO_PROTOCOL + commands.CMD_SELECTALL, "Select ~All", 0),
]
# ToDo HelpCommand
mi = menu.insertItem
msc = menu.setCommand
for i, item in enumerate(items):
if item:
mi(item[0], get_label(item[1], item[2], item[3]), 0, i)
msc(item[0], item[1])
else:
menu.insertSeparator(i)
def update_menu(self, menu, type):
""" Update state of menu items. """
commands = COMMANDS
window = self.imple.window
MODE_NONE = window.MODE_NONE
mode = window.get_mode()
me = menu.enableItem
is_none = type == MODE_NONE
state_delete = True
state_cut = True
state_copy = True
state_new = True
state_open = False
state_select_all = mode != window.MODE_TREE
state_paste = self.imple.clipboard.has_data()
imple = self.imple
view_mode = imple.get_view_mode()
if (view_mode & imple.MODE_TAG) or \
(view_mode & imple.MODE_HISTORY):
state_new = False
if type == window.MODE_TREE or \
mode == window.MODE_TREE:
if view_mode & imple.MODE_TAG:
state_copy = True
state_cut = False
if view_mode & imple.MODE_ROOT:
state_paste = False
state_copy = False
if view_mode & imple.MODE_ROOT:
state_delete = False
state_cut = False
elif type == window.MODE_GRID or \
mode == window.MODE_GRID:
state_open = True
if not window.grid_get_selection_count():
state_delete = False
state_cut = False
state_copy = False
state_open = False
if (view_mode & imple.MODE_TAG) and \
(view_mode & imple.MODE_ROOT):
state_cut = False
state_copy = True
state_paste = False
if view_mode & imple.MODE_HISTORY:
state_copy = True
state_cut = False
state_delete = False
state_select_all = False
state_paste = False
root_selected = False
if mode == window.MODE_TREE:
root_selected = window.tree_is_root_selected()
me(commands.ID_OPEN, state_open)
me(commands.ID_INSERT_BOOKMRAK, state_new)
me(commands.ID_INSERT_FOLDER, state_new)
me(commands.ID_INSERT_SEPARATOR, state_new)
me(commands.ID_CUT, state_cut)
me(commands.ID_COPY, state_copy)
me(commands.ID_PASTE, state_paste)
me(commands.ID_DELETE, state_delete)
me(commands.ID_SELECTALL, state_select_all)
def do_action_by_name(self, command):
""" Execute named action. """
commands = COMMANDS
try:
_command = command.split(":", 1)[1]
except:
_command = command
try:
if hasattr(self.imple, "do_" + _command):
getattr(self.imple, "do_" + _command)()
else:
self.imple.commands.execute_command(command)
except Exception as e:
print(e)
traceback.print_exc()
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from futurist import periodics
from oslo_utils import timeutils
from taskflow.engines.action_engine import executor
from taskflow.engines.worker_based import dispatcher
from taskflow.engines.worker_based import protocol as pr
from taskflow.engines.worker_based import proxy
from taskflow.engines.worker_based import types as wt
from taskflow import exceptions as exc
from taskflow import logging
from taskflow import task as task_atom
from taskflow.utils import kombu_utils as ku
from taskflow.utils import misc
from taskflow.utils import threading_utils as tu
LOG = logging.getLogger(__name__)
class WorkerTaskExecutor(executor.TaskExecutor):
"""Executes tasks on remote workers."""
def __init__(self, uuid, exchange, topics,
transition_timeout=pr.REQUEST_TIMEOUT,
url=None, transport=None, transport_options=None,
retry_options=None):
self._uuid = uuid
self._requests_cache = wt.RequestsCache()
self._transition_timeout = transition_timeout
type_handlers = {
pr.RESPONSE: dispatcher.Handler(self._process_response,
validator=pr.Response.validate),
}
self._proxy = proxy.Proxy(uuid, exchange,
type_handlers=type_handlers,
on_wait=self._on_wait, url=url,
transport=transport,
transport_options=transport_options,
retry_options=retry_options)
# NOTE(harlowja): This is the most simplest finder impl. that
# doesn't have external dependencies (outside of what this engine
# already requires); it though does create periodic 'polling' traffic
# to workers to 'learn' of the tasks they can perform (and requires
# pre-existing knowledge of the topics those workers are on to gather
# and update this information).
self._finder = wt.ProxyWorkerFinder(uuid, self._proxy, topics)
self._finder.notifier.register(wt.WorkerFinder.WORKER_ARRIVED,
self._on_worker)
self._helpers = tu.ThreadBundle()
self._helpers.bind(lambda: tu.daemon_thread(self._proxy.start),
after_start=lambda t: self._proxy.wait(),
before_join=lambda t: self._proxy.stop())
p_worker = periodics.PeriodicWorker.create([self._finder])
if p_worker:
self._helpers.bind(lambda: tu.daemon_thread(p_worker.start),
before_join=lambda t: p_worker.stop(),
after_join=lambda t: p_worker.reset(),
before_start=lambda t: p_worker.reset())
def _on_worker(self, event_type, details):
"""Process new worker that has arrived (and fire off any work)."""
worker = details['worker']
for request in self._requests_cache.get_waiting_requests(worker):
if request.transition_and_log_error(pr.PENDING, logger=LOG):
self._publish_request(request, worker)
def _process_response(self, response, message):
"""Process response from remote side."""
LOG.debug("Started processing response message '%s'",
ku.DelayedPretty(message))
try:
task_uuid = message.properties['correlation_id']
except KeyError:
LOG.warning("The 'correlation_id' message property is"
" missing in message '%s'",
ku.DelayedPretty(message))
else:
request = self._requests_cache.get(task_uuid)
if request is not None:
response = pr.Response.from_dict(response)
LOG.debug("Response with state '%s' received for '%s'",
response.state, request)
if response.state == pr.RUNNING:
request.transition_and_log_error(pr.RUNNING, logger=LOG)
elif response.state == pr.EVENT:
# Proxy the event + details to the task/request notifier...
event_type = response.data['event_type']
details = response.data['details']
request.notifier.notify(event_type, details)
elif response.state in (pr.FAILURE, pr.SUCCESS):
moved = request.transition_and_log_error(response.state,
logger=LOG)
if moved:
# NOTE(imelnikov): request should not be in the
# cache when another thread can see its result and
# schedule another request with the same uuid; so
# we remove it, then set the result...
del self._requests_cache[request.uuid]
request.set_result(**response.data)
else:
LOG.warning("Unexpected response status '%s'",
response.state)
else:
LOG.debug("Request with id='%s' not found", task_uuid)
@staticmethod
def _handle_expired_request(request):
"""Handle expired request.
When request has expired it is removed from the requests cache and
the `RequestTimeout` exception is set as a request result.
"""
if request.transition_and_log_error(pr.FAILURE, logger=LOG):
# Raise an exception (and then catch it) so we get a nice
# traceback that the request will get instead of it getting
# just an exception with no traceback...
try:
request_age = timeutils.delta_seconds(request.created_on,
timeutils.utcnow())
raise exc.RequestTimeout(
"Request '%s' has expired after waiting for %0.2f"
" seconds for it to transition out of (%s) states"
% (request, request_age, ", ".join(pr.WAITING_STATES)))
except exc.RequestTimeout:
with misc.capture_failure() as failure:
LOG.debug(failure.exception_str)
request.set_result(failure)
def _on_wait(self):
"""This function is called cyclically between draining events."""
self._requests_cache.cleanup(self._handle_expired_request)
def _submit_task(self, task, task_uuid, action, arguments,
progress_callback=None, **kwargs):
"""Submit task request to a worker."""
request = pr.Request(task, task_uuid, action, arguments,
self._transition_timeout, **kwargs)
# Register the callback, so that we can proxy the progress correctly.
if (progress_callback is not None and
request.notifier.can_be_registered(
task_atom.EVENT_UPDATE_PROGRESS)):
request.notifier.register(task_atom.EVENT_UPDATE_PROGRESS,
progress_callback)
cleaner = functools.partial(request.notifier.deregister,
task_atom.EVENT_UPDATE_PROGRESS,
progress_callback)
request.result.add_done_callback(lambda fut: cleaner())
# Get task's worker and publish request if worker was found.
worker = self._finder.get_worker_for_task(task)
if worker is not None:
# NOTE(skudriashev): Make sure request is set to the PENDING state
# before putting it into the requests cache to prevent the notify
# processing thread get list of waiting requests and publish it
# before it is published here, so it wouldn't be published twice.
if request.transition_and_log_error(pr.PENDING, logger=LOG):
self._requests_cache[request.uuid] = request
self._publish_request(request, worker)
else:
LOG.debug("Delaying submission of '%s', no currently known"
" worker/s available to process it", request)
self._requests_cache[request.uuid] = request
return request.result
def _publish_request(self, request, worker):
"""Publish request to a given topic."""
LOG.debug("Submitting execution of '%s' to worker '%s' (expecting"
" response identified by reply_to=%s and"
" correlation_id=%s)", request, worker, self._uuid,
request.uuid)
try:
self._proxy.publish(request, worker.topic,
reply_to=self._uuid,
correlation_id=request.uuid)
except Exception:
with misc.capture_failure() as failure:
LOG.critical("Failed to submit '%s' (transitioning it to"
" %s)", request, pr.FAILURE, exc_info=True)
if request.transition_and_log_error(pr.FAILURE, logger=LOG):
del self._requests_cache[request.uuid]
request.set_result(failure)
def execute_task(self, task, task_uuid, arguments,
progress_callback=None):
return self._submit_task(task, task_uuid, pr.EXECUTE, arguments,
progress_callback=progress_callback)
def revert_task(self, task, task_uuid, arguments, result, failures,
progress_callback=None):
return self._submit_task(task, task_uuid, pr.REVERT, arguments,
progress_callback=progress_callback,
result=result, failures=failures)
def wait_for_workers(self, workers=1, timeout=None):
"""Waits for geq workers to notify they are ready to do work.
NOTE(harlowja): if a timeout is provided this function will wait
until that timeout expires, if the amount of workers does not reach
the desired amount of workers before the timeout expires then this will
return how many workers are still needed, otherwise it will
return zero.
"""
return self._finder.wait_for_workers(workers=workers,
timeout=timeout)
def start(self):
"""Starts proxy thread and associated topic notification thread."""
self._helpers.start()
def stop(self):
"""Stops proxy thread and associated topic notification thread."""
self._helpers.stop()
self._requests_cache.clear(self._handle_expired_request)
self._finder.clear()
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_utils import timeutils
import six
import webob
from cinder.api.v2 import types
from cinder.api.v2.views import types as views_types
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import volume_types
def fake_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % six.text_type(id),
description='vol_type_desc_%s' % six.text_type(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
result = dict(vol_type_1=fake_volume_type(1),
vol_type_2=fake_volume_type(2),
vol_type_3=fake_volume_type(3)
)
if list_result:
return list(result.values())
return result
def return_empty_volume_types_get_all_types(context, filters=None, marker=None,
limit=None, sort_keys=None,
sort_dirs=None, offset=None,
list_result=False):
if list_result:
return []
return {}
def return_volume_types_get_volume_type(context, id):
if id == fake.WILL_NOT_BE_FOUND_ID:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return fake_volume_type(id)
def return_volume_types_get_default():
return fake_volume_type(1)
class VolumeTypesApiTest(test.TestCase):
def _create_volume_type(self, volume_type_name, extra_specs=None,
is_public=True, projects=None):
return volume_types.create(self.ctxt, volume_type_name, extra_specs,
is_public, projects).get('id')
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
self.ctxt = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID,
is_admin=True)
self.type_id1 = self._create_volume_type('volume_type1',
{'key1': 'value1'})
self.type_id2 = self._create_volume_type('volume_type2',
{'key2': 'value2'})
self.type_id3 = self._create_volume_type('volume_type3',
{'key3': 'value3'}, False,
[fake.PROJECT_ID])
def test_volume_types_index(self):
self.mock_object(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(expected_names), set(actual_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.mock_object(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID)
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_index_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/%s/types?limit=1' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['volume_types']))
self.assertEqual(self.type_id3, res['volume_types'][0]['id'])
expect_next_link = ('http://localhost/v2/%s/types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID, res['volume_types'][0]['id']))
self.assertEqual(expect_next_link, res['volume_type_links'][0]['href'])
def test_volume_types_index_with_offset(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?offset=1' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['volume_types']))
def test_volume_types_index_with_offset_out_of_range(self):
url = '/v2/%s/types?offset=424366766556787' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_volume_types_index_with_limit_and_offset(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?limit=2&offset=1' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(2, len(res['volume_types']))
self.assertEqual(self.type_id2, res['volume_types'][0]['id'])
self.assertEqual(self.type_id1, res['volume_types'][1]['id'])
def test_volume_types_index_with_limit_and_marker(self):
req = fakes.HTTPRequest.blank('/v2/%s/types?limit=1'
'&marker=%s' %
(fake.PROJECT_ID,
self.type_id2))
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(1, len(res['volume_types']))
self.assertEqual(self.type_id1, res['volume_types'][0]['id'])
def test_volume_types_index_with_valid_filter(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?is_public=True' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
self.assertEqual(3, len(res['volume_types']))
self.assertEqual(self.type_id3, res['volume_types'][0]['id'])
self.assertEqual(self.type_id2, res['volume_types'][1]['id'])
self.assertEqual(self.type_id1, res['volume_types'][2]['id'])
def test_volume_types_index_with_invalid_filter(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?id=%s' % (fake.PROJECT_ID, self.type_id1))
req.environ['cinder.context'] = context.RequestContext(
user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=False)
res = self.controller.index(req)
self.assertEqual(3, len(res['volume_types']))
def test_volume_types_index_with_sort_keys(self):
req = fakes.HTTPRequest.blank('/v2/%s/types?sort=id' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(3, len(res['volume_types']))
self.assertEqual(expect_result[0], res['volume_types'][0]['id'])
self.assertEqual(expect_result[1], res['volume_types'][1]['id'])
self.assertEqual(expect_result[2], res['volume_types'][2]['id'])
def test_volume_types_index_with_sort_and_limit(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?sort=id&limit=2' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result.sort(reverse=True)
self.assertEqual(2, len(res['volume_types']))
self.assertEqual(expect_result[0], res['volume_types'][0]['id'])
self.assertEqual(expect_result[1], res['volume_types'][1]['id'])
def test_volume_types_index_with_sort_keys_and_sort_dirs(self):
req = fakes.HTTPRequest.blank(
'/v2/%s/types?sort=id:asc' % fake.PROJECT_ID)
req.environ['cinder.context'] = self.ctxt
res = self.controller.index(req)
expect_result = [self.type_id1, self.type_id2, self.type_id3]
expect_result.sort()
self.assertEqual(3, len(res['volume_types']))
self.assertEqual(expect_result[0], res['volume_types'][0]['id'])
self.assertEqual(expect_result[1], res['volume_types'][1]['id'])
self.assertEqual(expect_result[2], res['volume_types'][2]['id'])
def test_volume_types_show(self):
self.mock_object(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/%s/types/' % fake.PROJECT_ID
+ type_id)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['volume_type']['id'])
type_name = 'vol_type_' + type_id
self.assertEqual(type_name, res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.mock_object(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/%s/types/%s' %
(fake.PROJECT_ID,
fake.WILL_NOT_BE_FOUND_ID))
self.assertRaises(exception.VolumeTypeNotFound, self.controller.show,
req, fake.WILL_NOT_BE_FOUND_ID)
def test_get_default(self):
self.mock_object(volume_types, 'get_default_volume_type',
return_volume_types_get_default)
req = fakes.HTTPRequest.blank('/v2/%s/types/default' % fake.PROJECT_ID)
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
self.assertEqual('vol_type_desc_1',
res_dict['volume_type']['description'])
def test_get_default_not_found(self):
self.mock_object(volume_types, 'get_default_volume_type',
return_value={})
req = fakes.HTTPRequest.blank('/v2/%s/types/default' % fake.PROJECT_ID)
req.method = 'GET'
self.assertRaises(exception.VolumeTypeNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
def test_view_builder_show_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2", use_admin_context=True)
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
extra_specs={},
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
def test_view_builder_show_qos_specs_id_policy(self):
with mock.patch('cinder.context.RequestContext.authorize',
side_effect=[False, True]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
def test_view_builder_show_extra_specs_policy(self):
with mock.patch('cinder.context.RequestContext.authorize',
side_effect=[True, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
with mock.patch('cinder.context.RequestContext.authorize',
side_effect=[False, False]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
def test_view_builder_show_pass_all_policy(self):
with mock.patch('cinder.context.RequestContext.authorize',
side_effect=[True, True]):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
extra_specs={},
is_public=True,
id=42,
)
self.assertDictEqual(expected_volume_type, output['volume_type'])
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertIn('volume_types', output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
is_public=True,
id=42 + i
)
self.assertDictEqual(expected_volume_type,
output['volume_types'][i])
def test_view_builder_list_admin(self):
view_builder = views_types.ViewBuilder()
now = timeutils.utcnow().isoformat()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2", use_admin_context=True)
output = view_builder.index(request, raw_volume_types)
self.assertIn('volume_types', output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
qos_specs_id='new_id',
is_public=True,
extra_specs={},
id=42 + i
)
self.assertDictEqual(expected_volume_type,
output['volume_types'][i])
|
|
from .helper import key_for_cypher, value_for_cypher
from .elements import (EqualClauseElement, NotEqualClauseElement, SubsetClauseElement,
NotSubsetClauseElement, NullClauseElement, NotNullClauseElement,
InClauseElement, NotInClauseElement, GtClauseElement, GteClauseElement,
LtClauseElement, LteClauseElement, RegexClauseElement)
class NodeAttribute(object):
has_subquery = False
acoustic = False
def __init__(self, node, label):
self.node = node
self.label = label
self.output_label = None
def __hash__(self):
return hash((self.node, self.label))
def __str__(self):
return '{}.{}'.format(self.node, self.label)
def __repr__(self):
return '<NodeAttribute \'{}\'>'.format(str(self))
def for_cypher(self):
return '{}.{}'.format(self.node.alias, key_for_cypher(self.label))
def for_json(self):
return [[x for x in self.node.for_json()] + [self.label], self.output_label]
def for_filter(self):
return self.for_cypher()
def for_column(self):
return self.for_cypher()
def value_type(self):
a_type = self.node.node_type
if a_type == 'Speaker':
for name, t in self.node.hierarchy.speaker_properties:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif a_type == 'Discourse':
for name, t in self.node.hierarchy.discourse_properties:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_token_property(a_type, self.label):
for name, t in self.node.hierarchy.token_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_type_property(a_type, self.label):
for name, t in self.node.hierarchy.type_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_subannotation_property(a_type, self.label):
for name, t in self.node.hierarchy.subannotation_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
raise ValueError('Property type "{}" not found for "{}".'.format(self.label, a_type))
def coerce_value(self, value):
if value is None:
return value
t = self.value_type()
if t is None:
return None
if isinstance(value, list):
return [t(x) for x in value]
return t(value)
@property
def alias(self):
""" Removes '`' from annotation, concatenates annotation alias and label"""
return '{}_{}'.format(self.node.alias.replace('`', ''), self.label)
@property
def alias_for_cypher(self):
return '`{}_{}`'.format(self.node.alias.replace('`', ''), self.label)
def aliased_for_cypher(self):
"""
creates cypher string to use in db
Returns
-------
string
string for db
"""
return '{} AS {}'.format(self.for_cypher(), self.alias_for_cypher)
def for_return(self):
return self.for_cypher()
def aliased_for_output(self):
"""
creates cypher string for output
Returns
-------
string
string for output
"""
return '{} AS {}'.format(self.for_return(), self.output_alias_for_cypher)
@property
def output_alias(self):
"""
returns output_label if there is one
return alias otherwise
"""
if self.output_label is not None:
return self.output_label
return self.alias
@property
def output_alias_for_cypher(self):
"""
returns output_label if there is one
return alias otherwise
"""
if self.output_label is not None:
return self.output_label
return self.alias_for_cypher
@property
def with_alias(self):
"""
returns type_alias if there is one
alias otherwise
"""
return self.node.alias
def column_name(self, label):
"""
sets a column name to label
"""
self.output_label = label
return self
def __eq__(self, other):
if self.label == 'subset':
return SubsetClauseElement(self, other)
if other is None:
return NullClauseElement(self, other)
return EqualClauseElement(self, other)
def __ne__(self, other):
if self.label == 'subset':
return NotSubsetClauseElement(self, other)
if other is None:
return NotNullClauseElement(self, other)
return NotEqualClauseElement(self, other)
def __gt__(self, other):
return GtClauseElement(self, other)
def __ge__(self, other):
return GteClauseElement(self, other)
def __lt__(self, other):
return LtClauseElement(self, other)
def __le__(self, other):
return LteClauseElement(self, other)
def in_(self, other):
"""
Checks if the parameter other has a 'cypher' element
executes the query if it does and appends the relevant results
or appends parameter other
Parameters
----------
other : list
attribute will be checked against elements in this list
Returns
-------
string
clause for asserting membership in a filter
"""
if hasattr(other, 'cypher'):
results = other.all()
t = []
for x in results:
t.append(getattr(x, self.label))
else:
t = other
return InClauseElement(self, t)
def not_in_(self, other):
"""
Checks if the parameter other has a 'cypher' element
executes the query if it does and appends the relevant results
or appends parameter other
Parameters
----------
other : list
attribute will be checked against elements in this list
Returns
-------
string
clause for asserting non-membership in a filter
"""
if hasattr(other, 'cypher'):
results = other.all()
t = []
for x in results:
t.append(getattr(x, self.label))
else:
t = other
return NotInClauseElement(self, t)
def regex(self, pattern):
""" Returns a clause for filtering based on regular expressions."""
return RegexClauseElement(self, pattern)
@property
def nodes(self):
return self.node.nodes
cache_alias = alias
class CollectionAttribute(NodeAttribute):
collapsing = True
acoustic = False
filter_template = '{alias}.{property}'
return_template = 'extract(n in {alias}|n.{property})'
def __repr__(self):
return '<CollectionAttribute \'{}\'>'.format(str(self))
def value_type(self):
n = self.node.collected_node
a_type = n.node_type
if a_type == 'Speaker':
for name, t in self.node.hierarchy.speaker_properties:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif a_type == 'Discourse':
for name, t in self.node.hierarchy.discourse_properties:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_token_property(a_type, self.label):
for name, t in self.node.hierarchy.token_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_type_property(a_type, self.label):
for name, t in self.node.hierarchy.type_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
elif self.node.hierarchy.has_subannotation_property(a_type, self.label):
for name, t in self.node.hierarchy.subannotation_properties[a_type]:
if name == self.label:
if t == type(None) or t is None:
return None
return t
raise ValueError('Property type "{}" not found for "{}".'.format(self.label, a_type))
def for_cypher(self):
return self.for_return()
def for_filter(self):
return self.filter_template.format(alias=self.node.collection_alias, property=self.label)
def for_return(self):
return self.return_template.format(alias=self.node.collection_alias, property=self.label)
@property
def with_aliases(self):
"""Returns annotation withs list """
return self.node.withs
@property
def with_alias(self):
"""returns annotation path_alias """
return self.node.collection_alias
@property
def cache_alias(self):
return self.node.anchor_node.alias
class Node(object):
non_optional = True
has_subquery = False
alias_template = 'node_{t}'
match_template = '({alias})'
def __init__(self, node_type, corpus=None, hierarchy=None):
self.node_type = node_type
self.corpus = corpus
self.hierarchy = hierarchy
self.subset_labels = []
def __eq__(self, other):
if not isinstance(other, Node):
return False
if self.node_type != other.node_type:
return False
if self.corpus != other.corpus:
return False
if self.subset_labels != other.subset_labels:
return False
return True
def __hash__(self):
return hash(self.key)
def __str__(self):
return self.key
def __repr__(self):
return '<Node of {} in {} corpus'.format(self.node_type, self.corpus)
def __getattr__(self, key):
return NodeAttribute(self, key)
@property
def key(self):
key = self.node_type
if self.subset_labels:
key += '_' + '_'.join(self.subset_labels)
return key
def for_json(self):
return [self.node_type]
def for_match(self):
return self.match_template.format(alias=self.define_alias)
@property
def alias(self):
return key_for_cypher(self.alias_template.format(t=self.key))
@property
def define_alias(self):
label_string = ':{}'.format(self.node_type)
if self.corpus is not None:
label_string += ':{}'.format(key_for_cypher(self.corpus))
if self.subset_labels:
label_string += ':' + ':'.join(map(key_for_cypher, self.subset_labels))
return '{}{}'.format(self.alias, label_string)
def filter_by_subset(self, *args):
""" adds each item in args to the hierarchy type_labels"""
self.subset_labels = sorted(set(self.subset_labels + list(args)))
return self
@property
def with_alias(self):
return self.alias
@property
def withs(self):
return [self.alias]
@property
def nodes(self):
return [self]
class CollectionNode(object):
has_subquery = True
non_optional = False
subquery_match_template = '({anchor_node_alias})-->({def_collection_alias})'
subquery_order_by_template = ''
subquery_template = '''{optional}MATCH {for_match}
{where_string}
WITH {input_with_string}, {with_pre_collection}
{sub_query}
{order_by}
WITH {output_with_string}'''
collect_template = 'collect({a}) as {a}'
def __init__(self, anchor_node, collected_node):
self.anchor_node = anchor_node
self.collected_node = collected_node
def subquery(self, withs, filters=None, optional=False):
input_with = ', '.join(withs)
new_withs = withs - {self.collection_alias}
output_with = ', '.join(new_withs) + ', ' + self.with_statement()
where_string = ''
if filters is not None:
relevant = []
for c in filters:
if c.involves(self):
relevant.append(c.for_cypher())
if relevant:
where_string = 'WHERE ' + '\nAND '.join(relevant)
for_match = self.subquery_match_template.format(anchor_node_alias=self.anchor_node.alias,
def_collection_alias=self.def_collection_alias)
order_by = self.subquery_order_by_template
kwargs = {'for_match': for_match,
'where_string': where_string,
'input_with_string': input_with,
'order_by': order_by,
'sub_query': '',
'optional': '',
'with_pre_collection': self.with_pre_collection,
'output_with_string': output_with}
if optional:
kwargs['optional']= 'OPTIONAL '
return self.subquery_template.format(**kwargs)
@property
def with_pre_collection(self):
return self.collection_alias
def __eq__(self, other):
if not isinstance(other, CollectionNode):
return False
if self.anchor_node != other.anchor_node:
return False
if self.collected_node != other.collected_node:
return False
return True
@property
def nodes(self):
return [self] + self.anchor_node.nodes + self.collected_node.nodes
@property
def hierarchy(self):
return self.anchor_node.hierarchy
@property
def corpus(self):
return self.anchor_node.corpus
@property
def node_type(self):
return self.anchor_node.node_type
def __str__(self):
return '{}.{}'.format(self.anchor_node, self.collected_node)
def __repr__(self):
return '<CollectionNode of {} under {}'.format(str(self.collected_node), str(self.anchor_node))
def __hash__(self):
return hash((self.anchor_node, self.collected_node))
@property
def withs(self):
withs = [self.collection_alias]
return withs
def with_statement(self):
withs = [self.collect_template.format(a=self.collection_alias)
]
return ', '.join(withs)
@property
def def_collection_alias(self):
label_string = ':{}'.format(self.collected_node.node_type)
if self.corpus is not None:
label_string += ':{}'.format(key_for_cypher(self.collected_node.corpus))
if self.collected_node.subset_labels:
label_string += ':' + ':'.join(map(key_for_cypher, self.collected_node.subset_labels))
return '{}{}'.format(self.collection_alias, label_string)
@property
def collection_alias(self):
return key_for_cypher('{}_in_{}'.format(self.collected_node.alias, self.anchor_node.alias))
alias = collection_alias
def filter_by_subset(self, *args):
self.collected_node = self.collected_node.filter_by_subset(*args)
return self
def __getattr__(self, key):
return CollectionAttribute(self, key)
|
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Build the internal node tree from source code.
Does all the Python parsing and puts it into a tree structure for use in later
stages of the compilation process.
In the "nuitka.tree.TreeHelpers" module, the dispatching is happening. One function
deals with every node kind as found in the AST. The parsing is centered around
the module "ast" output.
Many higher level language features and translated into lower level ones.
In-place assignments, for loops, while loops, classes, complex calls, with
statements, and even or/and etc. are all translated to simpler constructs.
The output of this module is a node tree, which contains only relatively low
level operations. A property of the output is also an overlaid tree of provider
structure that indicates variable provision.
Classes are handled in a separate module. They are re-formulated into functions
producing dictionaries used to call the metaclass with.
Try/except/else statements are handled in a separate module. They are
re-formulated into using a temporary variable to track if the else branch
should execute.
Try/finally statements are handled in a separate module. They are re-formulated
to use a nested try/finally for (un)publishing the exception for Python3.
With statements are handled in a separate module. They are re-formulated into
special attribute lookups for "__enter__" and "__exit__", calls of them,
catching and passing in exceptions raised.
"""
import marshal
import os
import sys
from nuitka import (
ModuleRegistry,
Options,
OutputDirectories,
SourceCodeReferences,
)
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
long,
unicode,
)
from nuitka.Caching import (
getCachedImportedModulesNames,
hasCachedImportedModulesNames,
)
from nuitka.containers.oset import OrderedSet
from nuitka.Errors import CodeTooComplexCode
from nuitka.freezer.Standalone import detectEarlyImports
from nuitka.importing import Importing
from nuitka.importing.ImportCache import addImportedModule
from nuitka.importing.PreloadedPackages import getPthImportedPackages
from nuitka.nodes.AssignNodes import StatementAssignmentVariableName
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeLookup,
StatementAssignmentAttribute,
)
from nuitka.nodes.BuiltinFormatNodes import (
ExpressionBuiltinAscii,
ExpressionBuiltinFormat,
)
from nuitka.nodes.BuiltinRefNodes import quick_names
from nuitka.nodes.BuiltinTypeNodes import ExpressionBuiltinStrP3
from nuitka.nodes.ConditionalNodes import (
ExpressionConditional,
makeStatementConditional,
)
from nuitka.nodes.ConstantRefNodes import (
ExpressionConstantEllipsisRef,
ExpressionConstantNoneRef,
makeConstantRefNode,
)
from nuitka.nodes.CoroutineNodes import ExpressionAsyncWait
from nuitka.nodes.ExceptionNodes import (
StatementRaiseException,
StatementReraiseException,
)
from nuitka.nodes.FutureSpecs import FutureSpec
from nuitka.nodes.GeneratorNodes import StatementGeneratorReturn
from nuitka.nodes.ImportNodes import makeExpressionAbsoluteImportNode
from nuitka.nodes.LoopNodes import StatementLoopBreak, StatementLoopContinue
from nuitka.nodes.ModuleAttributeNodes import (
ExpressionModuleAttributeFileRef,
ExpressionModuleAttributeSpecRef,
)
from nuitka.nodes.ModuleNodes import (
CompiledPythonModule,
CompiledPythonPackage,
PythonMainModule,
PythonShlibModule,
makeUncompiledPythonModule,
)
from nuitka.nodes.NodeMakingHelpers import (
makeRaiseExceptionStatementFromInstance,
)
from nuitka.nodes.OperatorNodes import makeBinaryOperationNode
from nuitka.nodes.OperatorNodesUnary import makeExpressionOperationUnary
from nuitka.nodes.ReturnNodes import makeStatementReturn
from nuitka.nodes.SliceNodes import makeExpressionBuiltinSlice
from nuitka.nodes.StatementNodes import StatementExpressionOnly
from nuitka.nodes.StringConcatenationNodes import ExpressionStringConcatenation
from nuitka.nodes.VariableRefNodes import ExpressionVariableNameRef
from nuitka.nodes.YieldNodes import ExpressionYieldFromWaitable
from nuitka.optimizations.BytecodeDemotion import demoteSourceCodeToBytecode
from nuitka.Options import shallWarnUnusualCode
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import (
memory_logger,
optimization_logger,
plugins_logger,
recursion_logger,
unusual_logger,
)
from nuitka.utils import MemoryUsage
from nuitka.utils.FileOperations import splitPath
from nuitka.utils.ModuleNames import ModuleName
from . import SyntaxErrors
from .ReformulationAssertStatements import buildAssertNode
from .ReformulationAssignmentStatements import (
buildAnnAssignNode,
buildAssignNode,
buildDeleteNode,
buildInplaceAssignNode,
buildNamedExprNode,
)
from .ReformulationBooleanExpressions import buildBoolOpNode
from .ReformulationCallExpressions import buildCallNode
from .ReformulationClasses import buildClassNode
from .ReformulationComparisonExpressions import buildComparisonNode
from .ReformulationContractionExpressions import (
buildDictContractionNode,
buildGeneratorExpressionNode,
buildListContractionNode,
buildSetContractionNode,
)
from .ReformulationDictionaryCreation import buildDictionaryNode
from .ReformulationExecStatements import buildExecNode
from .ReformulationForLoopStatements import (
buildAsyncForLoopNode,
buildForLoopNode,
)
from .ReformulationFunctionStatements import (
buildAsyncFunctionNode,
buildFunctionNode,
)
from .ReformulationImportStatements import (
buildImportFromNode,
buildImportModulesNode,
checkFutureImportsOnlyAtStart,
getFutureSpec,
popFutureSpec,
pushFutureSpec,
)
from .ReformulationLambdaExpressions import buildLambdaNode
from .ReformulationNamespacePackages import (
createImporterCacheAssignment,
createNamespacePackage,
createPathAssignment,
)
from .ReformulationPrintStatements import buildPrintNode
from .ReformulationSequenceCreation import (
buildListCreationNode,
buildSetCreationNode,
buildTupleCreationNode,
)
from .ReformulationSubscriptExpressions import buildSubscriptNode
from .ReformulationTryExceptStatements import buildTryExceptionNode
from .ReformulationTryFinallyStatements import buildTryFinallyNode
from .ReformulationWhileLoopStatements import buildWhileLoopNode
from .ReformulationWithStatements import buildAsyncWithNode, buildWithNode
from .ReformulationYieldExpressions import buildYieldFromNode, buildYieldNode
from .SourceReading import (
checkPythonVersionFromCode,
readSourceCodeFromFilename,
)
from .TreeHelpers import (
buildNode,
buildNodeList,
buildStatementsNode,
extractDocFromBody,
getBuildContext,
getKind,
makeModuleFrame,
makeStatementsSequence,
makeStatementsSequenceFromStatement,
mangleName,
mergeStatements,
parseSourceCodeToAst,
setBuildingDispatchers,
)
from .VariableClosure import completeVariableClosures
if str is not bytes:
def buildVariableReferenceNode(provider, node, source_ref):
# Shortcut for Python3, which gives syntax errors for assigning these.
if node.id in quick_names:
return makeConstantRefNode(
constant=quick_names[node.id], source_ref=source_ref
)
return ExpressionVariableNameRef(
provider=provider,
variable_name=mangleName(node.id, provider),
source_ref=source_ref,
)
else:
def buildVariableReferenceNode(provider, node, source_ref):
return ExpressionVariableNameRef(
provider=provider,
variable_name=mangleName(node.id, provider),
source_ref=source_ref,
)
# Python3.4 or higher, True and False, are not given as variables anymore.
# Python3.8, all kinds of constants are like this.
def buildNamedConstantNode(node, source_ref):
return makeConstantRefNode(
constant=node.value, source_ref=source_ref, user_provided=True
)
def buildConditionNode(provider, node, source_ref):
# Conditional statements may have one or two branches. We will never see an
# "elif", because that's already dealt with by module "ast", which turns it
# into nested conditional statements.
return makeStatementConditional(
condition=buildNode(provider, node.test, source_ref),
yes_branch=buildStatementsNode(
provider=provider, nodes=node.body, source_ref=source_ref
),
no_branch=buildStatementsNode(
provider=provider,
nodes=node.orelse if node.orelse else None,
source_ref=source_ref,
),
source_ref=source_ref,
)
def buildTryFinallyNode2(provider, node, source_ref):
# Try/finally node statements of old style.
return buildTryFinallyNode(
provider=provider,
build_tried=lambda: buildStatementsNode(
provider=provider, nodes=node.body, source_ref=source_ref
),
node=node,
source_ref=source_ref,
)
def buildTryNode(provider, node, source_ref):
# Note: This variant is used for Python3.3 or higher only, older stuff uses
# the above ones, this one merges try/except with try/finally in the
# "ast". We split it up again, as it's logically separated of course.
# Shortcut missing try/finally.
if not node.handlers:
return buildTryFinallyNode2(provider, node, source_ref)
if not node.finalbody:
return buildTryExceptionNode(
provider=provider, node=node, source_ref=source_ref
)
return buildTryFinallyNode(
provider=provider,
build_tried=lambda: makeStatementsSequence(
statements=mergeStatements(
(
buildTryExceptionNode(
provider=provider, node=node, source_ref=source_ref
),
),
allow_none=True,
),
allow_none=True,
source_ref=source_ref,
),
node=node,
source_ref=source_ref,
)
def buildRaiseNode(provider, node, source_ref):
# Raise statements. Under Python2 they may have type, value and traceback
# attached, for Python3, you can only give type (actually value) and cause.
if python_version < 0x300:
exception_type = buildNode(provider, node.type, source_ref, allow_none=True)
exception_value = buildNode(provider, node.inst, source_ref, allow_none=True)
exception_trace = buildNode(provider, node.tback, source_ref, allow_none=True)
exception_cause = None
else:
exception_type = buildNode(provider, node.exc, source_ref, allow_none=True)
exception_value = None
exception_trace = None
exception_cause = buildNode(provider, node.cause, source_ref, allow_none=True)
if exception_type is None:
assert exception_value is None
assert exception_trace is None
assert exception_cause is None
result = StatementReraiseException(source_ref=source_ref)
else:
result = StatementRaiseException(
exception_type=exception_type,
exception_value=exception_value,
exception_trace=exception_trace,
exception_cause=exception_cause,
source_ref=source_ref,
)
if exception_cause is not None:
result.setCompatibleSourceReference(
source_ref=exception_cause.getCompatibleSourceReference()
)
elif exception_trace is not None:
result.setCompatibleSourceReference(
source_ref=exception_trace.getCompatibleSourceReference()
)
elif exception_value is not None:
result.setCompatibleSourceReference(
source_ref=exception_value.getCompatibleSourceReference()
)
elif exception_type is not None:
result.setCompatibleSourceReference(
source_ref=exception_type.getCompatibleSourceReference()
)
return result
def handleGlobalDeclarationNode(provider, node, source_ref):
# On the module level, there is nothing to do.
if provider.isCompiledPythonModule():
if shallWarnUnusualCode():
unusual_logger.warning(
"%s: Using 'global' statement on module level has no effect."
% source_ref.getAsString(),
)
return None
# Need to catch the error of declaring a parameter variable as global
# ourselves here. The AST parsing doesn't catch it, so we check here.
if provider.isExpressionFunctionBody():
parameters = provider.getParameters()
for variable_name in node.names:
if variable_name in parameters.getParameterNames():
SyntaxErrors.raiseSyntaxError(
"name '%s' is %s and global"
% (
variable_name,
"local" if python_version < 0x300 else "parameter",
),
source_ref.atColumnNumber(node.col_offset),
)
# The module the "global" statement refers to.
module = provider.getParentModule()
# Can give multiple names.
for variable_name in node.names:
closure_variable = None
# Re-use already taken global variables, in order to avoid creating yet
# another instance, esp. as the indications could then potentially not
# be shared.
if provider.hasTakenVariable(variable_name):
closure_variable = provider.getTakenVariable(variable_name)
# Only global variables count. Could have a closure reference to
# a location of a parent function here.
if not closure_variable.isModuleVariable():
closure_variable = None
if closure_variable is None:
module_variable = module.getVariableForAssignment(
variable_name=variable_name
)
closure_variable = provider.addClosureVariable(variable=module_variable)
assert closure_variable.isModuleVariable()
if (
python_version < 0x340
and provider.isExpressionClassBody()
and closure_variable.getName() == "__class__"
):
SyntaxErrors.raiseSyntaxError("cannot make __class__ global", source_ref)
provider.getLocalsScope().registerClosureVariable(variable=closure_variable)
# Drop this, not really part of our tree.
return None
def handleNonlocalDeclarationNode(provider, node, source_ref):
# Need to catch the error of declaring a parameter variable as global
# ourselves here. The AST parsing doesn't catch it, but we can do it here.
parameter_provider = provider
while (
parameter_provider.isExpressionGeneratorObjectBody()
or parameter_provider.isExpressionCoroutineObjectBody()
or parameter_provider.isExpressionAsyncgenObjectBody()
):
parameter_provider = parameter_provider.getParentVariableProvider()
if parameter_provider.isExpressionClassBody():
parameter_names = ()
else:
parameter_names = parameter_provider.getParameters().getParameterNames()
for variable_name in node.names:
if variable_name in parameter_names:
SyntaxErrors.raiseSyntaxError(
"name '%s' is parameter and nonlocal" % (variable_name),
source_ref.atColumnNumber(node.col_offset),
)
provider.addNonlocalsDeclaration(
names=tuple(node.names),
user_provided=True,
source_ref=source_ref.atColumnNumber(node.col_offset),
)
# Drop this, not really part of our tree.
return None
def buildStringNode(node, source_ref):
assert type(node.s) in (str, unicode)
return makeConstantRefNode(
constant=node.s, source_ref=source_ref, user_provided=True
)
def buildNumberNode(node, source_ref):
assert type(node.n) in (int, long, float, complex), type(node.n)
return makeConstantRefNode(
constant=node.n, source_ref=source_ref, user_provided=True
)
def buildBytesNode(node, source_ref):
return makeConstantRefNode(
constant=node.s, source_ref=source_ref, user_provided=True
)
def buildEllipsisNode(source_ref):
return ExpressionConstantEllipsisRef(source_ref=source_ref)
def buildStatementLoopContinue(node, source_ref):
source_ref = source_ref.atColumnNumber(node.col_offset)
# Python forbids this, although technically it's probably not much of
# an issue.
if getBuildContext() == "finally" and python_version < 0x380:
SyntaxErrors.raiseSyntaxError(
"'continue' not supported inside 'finally' clause", source_ref
)
return StatementLoopContinue(source_ref=source_ref)
def buildStatementLoopBreak(provider, node, source_ref):
# A bit unusual, we need the provider, but not the node,
# pylint: disable=unused-argument
return StatementLoopBreak(source_ref=source_ref.atColumnNumber(node.col_offset))
def buildAttributeNode(provider, node, source_ref):
return ExpressionAttributeLookup(
expression=buildNode(provider, node.value, source_ref),
attribute_name=mangleName(node.attr, provider),
source_ref=source_ref,
)
def buildReturnNode(provider, node, source_ref):
if provider.isExpressionClassBody() or provider.isCompiledPythonModule():
SyntaxErrors.raiseSyntaxError(
"'return' outside function", source_ref.atColumnNumber(node.col_offset)
)
expression = buildNode(provider, node.value, source_ref, allow_none=True)
if provider.isExpressionGeneratorObjectBody():
if expression is not None and python_version < 0x300:
SyntaxErrors.raiseSyntaxError(
"'return' with argument inside generator",
source_ref.atColumnNumber(node.col_offset),
)
if provider.isExpressionAsyncgenObjectBody():
if expression is not None:
SyntaxErrors.raiseSyntaxError(
"'return' with value in async generator",
source_ref.atColumnNumber(node.col_offset),
)
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
if expression is None:
expression = ExpressionConstantNoneRef(source_ref=source_ref)
return StatementGeneratorReturn(expression=expression, source_ref=source_ref)
else:
return makeStatementReturn(expression=expression, source_ref=source_ref)
def buildExprOnlyNode(provider, node, source_ref):
result = StatementExpressionOnly(
expression=buildNode(provider, node.value, source_ref), source_ref=source_ref
)
result.setCompatibleSourceReference(
result.subnode_expression.getCompatibleSourceReference()
)
return result
def buildUnaryOpNode(provider, node, source_ref):
operator = getKind(node.op)
# Delegate this one to boolean operation code.
if operator == "Not":
return buildBoolOpNode(provider=provider, node=node, source_ref=source_ref)
operand = buildNode(provider, node.operand, source_ref)
return makeExpressionOperationUnary(
operator=operator, operand=operand, source_ref=source_ref
)
def buildBinaryOpNode(provider, node, source_ref):
operator = getKind(node.op)
if operator == "Div":
operator = "TrueDiv" if getFutureSpec().isFutureDivision() else "OldDiv"
left = buildNode(provider, node.left, source_ref)
right = buildNode(provider, node.right, source_ref)
result = makeBinaryOperationNode(
operator=operator, left=left, right=right, source_ref=source_ref
)
result.setCompatibleSourceReference(source_ref=right.getCompatibleSourceReference())
return result
def buildReprNode(provider, node, source_ref):
return makeExpressionOperationUnary(
operator="Repr",
operand=buildNode(provider, node.value, source_ref),
source_ref=source_ref,
)
def buildConditionalExpressionNode(provider, node, source_ref):
return ExpressionConditional(
condition=buildNode(provider, node.test, source_ref),
expression_yes=buildNode(provider, node.body, source_ref),
expression_no=buildNode(provider, node.orelse, source_ref),
source_ref=source_ref,
)
def buildAwaitNode(provider, node, source_ref):
return ExpressionYieldFromWaitable(
expression=ExpressionAsyncWait(
expression=buildNode(provider, node.value, source_ref),
source_ref=source_ref,
),
source_ref=source_ref,
)
def buildFormattedValueNode(provider, node, source_ref):
value = buildNode(provider, node.value, source_ref)
conversion = node.conversion % 4 if node.conversion > 0 else 0
if conversion == 0:
pass
elif conversion == 3:
# TODO: We might start using this for Python2 too.
assert str is not bytes
value = ExpressionBuiltinStrP3(
value=value, encoding=None, errors=None, source_ref=source_ref
)
elif conversion == 2:
value = makeExpressionOperationUnary(
operator="Repr", operand=value, source_ref=source_ref
)
elif conversion == 1:
value = ExpressionBuiltinAscii(value=value, source_ref=source_ref)
else:
assert False, conversion
return ExpressionBuiltinFormat(
value=value,
format_spec=buildNode(provider, node.format_spec, source_ref, allow_none=True),
source_ref=source_ref,
)
def buildJoinedStrNode(provider, node, source_ref):
if node.values:
return ExpressionStringConcatenation(
values=buildNodeList(provider, node.values, source_ref),
source_ref=source_ref,
)
else:
return makeConstantRefNode(constant="", source_ref=source_ref)
def buildSliceNode(provider, node, source_ref):
"""Python3.9 or higher, slice notations."""
return makeExpressionBuiltinSlice(
start=buildNode(provider, node.lower, source_ref, allow_none=True),
stop=buildNode(provider, node.upper, source_ref, allow_none=True),
step=buildNode(provider, node.step, source_ref, allow_none=True),
source_ref=source_ref,
)
setBuildingDispatchers(
path_args3={
"Name": buildVariableReferenceNode,
"Assign": buildAssignNode,
"AnnAssign": buildAnnAssignNode,
"Delete": buildDeleteNode,
"Lambda": buildLambdaNode,
"GeneratorExp": buildGeneratorExpressionNode,
"If": buildConditionNode,
"While": buildWhileLoopNode,
"For": buildForLoopNode,
"AsyncFor": buildAsyncForLoopNode,
"Compare": buildComparisonNode,
"ListComp": buildListContractionNode,
"DictComp": buildDictContractionNode,
"SetComp": buildSetContractionNode,
"Dict": buildDictionaryNode,
"Set": buildSetCreationNode,
"Tuple": buildTupleCreationNode,
"List": buildListCreationNode,
"Global": handleGlobalDeclarationNode,
"Nonlocal": handleNonlocalDeclarationNode,
"TryExcept": buildTryExceptionNode,
"TryFinally": buildTryFinallyNode2,
"Try": buildTryNode,
"Raise": buildRaiseNode,
"Import": buildImportModulesNode,
"ImportFrom": buildImportFromNode,
"Assert": buildAssertNode,
"Exec": buildExecNode,
"With": buildWithNode,
"AsyncWith": buildAsyncWithNode,
"FunctionDef": buildFunctionNode,
"AsyncFunctionDef": buildAsyncFunctionNode,
"Await": buildAwaitNode,
"ClassDef": buildClassNode,
"Print": buildPrintNode,
"Call": buildCallNode,
"Subscript": buildSubscriptNode,
"BoolOp": buildBoolOpNode,
"Attribute": buildAttributeNode,
"Return": buildReturnNode,
"Yield": buildYieldNode,
"YieldFrom": buildYieldFromNode,
"Expr": buildExprOnlyNode,
"UnaryOp": buildUnaryOpNode,
"BinOp": buildBinaryOpNode,
"Repr": buildReprNode,
"AugAssign": buildInplaceAssignNode,
"IfExp": buildConditionalExpressionNode,
"Break": buildStatementLoopBreak,
"JoinedStr": buildJoinedStrNode,
"FormattedValue": buildFormattedValueNode,
"NamedExpr": buildNamedExprNode,
"Slice": buildSliceNode,
},
path_args2={
"Constant": buildNamedConstantNode, # Python3.8
"NameConstant": buildNamedConstantNode, # Python3.8 or below
"Str": buildStringNode,
"Num": buildNumberNode,
"Bytes": buildBytesNode,
"Continue": buildStatementLoopContinue,
},
path_args1={"Ellipsis": buildEllipsisNode},
)
def buildParseTree(provider, ast_tree, source_ref, is_module, is_main):
# There are a bunch of branches here, mostly to deal with version
# differences for module default variables. pylint: disable=too-many-branches
# Maybe one day, we do exec inlining again, that is what this is for,
# then is_module won't be True, for now it always is.
pushFutureSpec()
if is_module:
provider.setFutureSpec(getFutureSpec())
body, doc = extractDocFromBody(ast_tree)
if is_module and is_main and python_version >= 0x360:
provider.markAsNeedsAnnotationsDictionary()
result = buildStatementsNode(provider=provider, nodes=body, source_ref=source_ref)
# After building, we can verify that all future statements were where they
# belong, namely at the start of the module.
checkFutureImportsOnlyAtStart(body)
internal_source_ref = source_ref.atInternal()
statements = []
if is_module:
# Add import of "site" module of main programs visibly in the node tree,
# so recursion and optimization can pick it up, checking its effects.
if is_main and not Options.hasPythonFlagNoSite():
statements.append(
StatementExpressionOnly(
expression=makeExpressionAbsoluteImportNode(
module_name="site", source_ref=source_ref
),
source_ref=source_ref,
)
)
for path_imported_name in getPthImportedPackages():
statements.append(
StatementExpressionOnly(
expression=makeExpressionAbsoluteImportNode(
module_name=path_imported_name, source_ref=source_ref
),
source_ref=source_ref,
)
)
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__doc__",
source=makeConstantRefNode(
constant=doc, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
)
)
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__file__",
source=ExpressionModuleAttributeFileRef(
variable=provider.getVariableForReference("__file__"),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
if provider.isCompiledPythonPackage():
# This assigns "__path__" value.
statements.append(createPathAssignment(provider, internal_source_ref))
statements.append(
createImporterCacheAssignment(provider, internal_source_ref)
)
if python_version >= 0x340 and not is_main:
statements += (
StatementAssignmentAttribute(
expression=ExpressionModuleAttributeSpecRef(
variable=provider.getVariableForReference("__spec__"),
source_ref=internal_source_ref,
),
attribute_name="origin",
source=ExpressionModuleAttributeFileRef(
variable=provider.getVariableForReference("__file__"),
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
),
StatementAssignmentAttribute(
expression=ExpressionModuleAttributeSpecRef(
variable=provider.getVariableForReference("__spec__"),
source_ref=internal_source_ref,
),
attribute_name="has_location",
source=makeConstantRefNode(True, internal_source_ref),
source_ref=internal_source_ref,
),
)
if provider.isCompiledPythonPackage():
statements.append(
StatementAssignmentAttribute(
expression=ExpressionModuleAttributeSpecRef(
variable=provider.getVariableForReference("__spec__"),
source_ref=internal_source_ref,
),
attribute_name="submodule_search_locations",
source=ExpressionVariableNameRef(
provider=provider,
variable_name="__path__",
source_ref=internal_source_ref,
),
source_ref=internal_source_ref,
)
)
if python_version >= 0x300:
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__cached__",
source=ExpressionConstantNoneRef(source_ref=internal_source_ref),
source_ref=internal_source_ref,
)
)
needs__initializing__ = (
not provider.isMainModule() and 0x300 <= python_version < 0x340
)
if needs__initializing__:
# Set "__initializing__" at the beginning to True
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__initializing__",
source=makeConstantRefNode(
constant=True, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
)
)
if provider.needsAnnotationsDictionary():
# Set "__annotations__" on module level to {}
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__annotations__",
source=makeConstantRefNode(
constant={}, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
)
)
# Now the module body if there is any at all.
if result is not None:
statements.extend(result.subnode_statements)
if needs__initializing__:
# Set "__initializing__" at the end to False
statements.append(
StatementAssignmentVariableName(
provider=provider,
variable_name="__initializing__",
source=makeConstantRefNode(
constant=False, source_ref=internal_source_ref, user_provided=True
),
source_ref=internal_source_ref,
)
)
if is_module:
result = makeModuleFrame(
module=provider, statements=statements, source_ref=source_ref
)
popFutureSpec()
return result
else:
assert False
def decideCompilationMode(is_top, module_name, source_ref):
result = Plugins.decideCompilation(module_name, source_ref)
if result == "bytecode" and is_top:
plugins_logger.warning(
"""\
Ignoring plugin decision to compile top level package '%s'
as bytecode, the extension module entry point is technically
required to compiled."""
% module_name
)
result = "compiled"
return result
def _decideModuleSourceRef(filename, package, is_shlib, is_top, is_main, is_fake):
# Many branches due to the many cases
# pylint: disable=too-many-branches
assert package is None or type(package) is ModuleName
assert filename is not None
is_namespace = False
is_package = False
if is_main and os.path.isdir(filename):
source_filename = os.path.join(filename, "__main__.py")
if not os.path.isfile(source_filename):
sys.stderr.write(
"%s: can't find '__main__' module in '%s'\n"
% (os.path.basename(sys.argv[0]), filename)
)
sys.exit(2)
filename = source_filename
main_added = True
else:
main_added = False
if is_fake:
source_filename = filename
source_ref = SourceCodeReferences.fromFilename(filename=filename)
module_name = is_fake
elif os.path.isfile(filename):
source_filename = filename
source_ref = SourceCodeReferences.fromFilename(filename=filename)
if is_main:
module_name = ModuleName("__main__")
else:
# Derive module name from filename.
module_name = os.path.basename(filename)
if is_shlib:
module_name = module_name.split(".")[0]
elif module_name.endswith(".py"):
module_name = module_name[:-3]
if "." in module_name:
sys.stderr.write(
"Error, '%s' is not a proper python module name.\n" % (module_name)
)
sys.exit(2)
module_name = ModuleName.makeModuleNameInPackage(module_name, package)
elif Importing.isPackageDir(filename):
is_package = True
if is_top:
module_name = splitPath(filename)[-1]
else:
module_name = os.path.basename(filename)
module_name = ModuleName.makeModuleNameInPackage(module_name, package)
source_filename = os.path.join(filename, "__init__.py")
if not os.path.isfile(source_filename):
source_ref = SourceCodeReferences.fromFilename(
filename=filename
).atInternal()
is_namespace = True
else:
source_ref = SourceCodeReferences.fromFilename(
filename=os.path.abspath(source_filename)
)
else:
sys.stderr.write(
"%s: can't open file '%s'.\n" % (os.path.basename(sys.argv[0]), filename)
)
sys.exit(2)
return (
module_name,
main_added,
is_package,
is_namespace,
source_ref,
source_filename,
)
def _createModule(
module_name,
source_code,
source_ref,
package,
is_shlib,
is_namespace,
is_package,
is_top,
is_main,
main_added,
):
# Many details due to the caching done here.
# pylint: disable=too-many-locals
assert package is None or type(package) is ModuleName
if is_shlib:
result = PythonShlibModule(module_name=module_name, source_ref=source_ref)
elif is_main:
result = PythonMainModule(
main_added=main_added,
mode=decideCompilationMode(False, module_name, source_ref),
future_spec=None,
source_ref=source_ref,
)
checkPythonVersionFromCode(source_code)
elif is_namespace:
result = createNamespacePackage(module_name, is_top, source_ref)
else:
mode = decideCompilationMode(is_top, module_name, source_ref)
if (
mode == "bytecode"
and not is_top
and hasCachedImportedModulesNames(module_name, source_code)
):
optimization_logger.info(
"'%s' is included as bytecode." % (module_name.asString())
)
result = makeUncompiledPythonModule(
module_name=module_name,
filename=source_ref.getFilename(),
bytecode=demoteSourceCodeToBytecode(
module_name=module_name,
source_code=source_code,
filename=source_ref.getFilename(),
),
user_provided=False,
technical=False,
is_package=is_package,
)
used_modules = OrderedSet()
for used_module_name in getCachedImportedModulesNames(
module_name=module_name, source_code=source_code
):
(_module_package, module_filename, _finding,) = Importing.findModule(
importing=result,
module_name=used_module_name,
parent_package=None,
level=-1,
warn=False,
)
used_modules.add((used_module_name, os.path.relpath(module_filename)))
result.setUsedModules(used_modules)
# Not used anymore
source_code = None
else:
if is_package:
result = CompiledPythonPackage(
module_name=module_name,
is_top=is_top,
mode=mode,
future_spec=None,
source_ref=source_ref,
)
else:
result = CompiledPythonModule(
module_name=module_name,
is_top=is_top,
mode=mode,
future_spec=None,
source_ref=source_ref,
)
return result
def createModuleTree(module, source_ref, ast_tree, is_main):
if Options.isShowMemory():
memory_watch = MemoryUsage.MemoryWatch()
module_body = buildParseTree(
provider=module,
ast_tree=ast_tree,
source_ref=source_ref,
is_module=True,
is_main=is_main,
)
if module_body.isStatementsFrame():
module_body = makeStatementsSequenceFromStatement(statement=module_body)
module.setChild("body", module_body)
completeVariableClosures(module)
if Options.isShowMemory():
memory_watch.finish()
memory_logger.info(
"Memory usage changed loading module '%s': %s"
% (module.getFullName(), memory_watch.asStr())
)
def buildMainModuleTree(filename, package, is_main):
# Detect to be frozen modules if any, so we can consider to not follow
# to them.
module, _added = buildModule(
module_filename=filename,
module_package=package,
source_code=None,
is_top=True,
is_main=is_main,
is_shlib=False,
is_fake=False,
hide_syntax_error=False,
)
if Options.isStandaloneMode() and is_main:
module.setEarlyModules(detectEarlyImports())
# Main modules do not get added to the import cache, but plugins get to see it.
if module.isMainModule():
Plugins.onModuleDiscovered(module)
else:
addImportedModule(imported_module=module)
return module
def _makeModuleBodyFromSyntaxError(exc, module_name, module_filename):
assert module_name != "markupsafe._speedups", module_filename
if module_filename not in Importing.warned_about:
Importing.warned_about.add(module_filename)
recursion_logger.warning(
"""\
Cannot follow import to module '%s' because of %r."""
% (module_name, exc.__class__.__name__)
)
source_ref = SourceCodeReferences.fromFilename(filename=module_filename)
module = CompiledPythonModule(
module_name=module_name,
is_top=False,
mode="compiled",
future_spec=FutureSpec(),
source_ref=source_ref,
)
module_body = makeModuleFrame(
module=module,
statements=(
makeRaiseExceptionStatementFromInstance(
source_ref=source_ref, exception=exc
),
),
source_ref=source_ref,
)
module_body = makeStatementsSequenceFromStatement(statement=module_body)
module.setChild("body", module_body)
return module
def _makeModuleBodyTooComplex(module_name, module_filename, source_code, is_package):
if module_filename not in Importing.warned_about:
Importing.warned_about.add(module_filename)
recursion_logger.warning(
"""\
Cannot follow import to import module '%r' ('%r') because code is too complex."""
% (
module_name,
module_filename,
)
)
module = makeUncompiledPythonModule(
module_name=module_name,
filename=module_filename,
bytecode=marshal.dumps(
compile(source_code, module_filename, "exec", dont_inherit=True)
),
is_package=is_package,
user_provided=True,
technical=False,
)
ModuleRegistry.addUncompiledModule(module)
def buildModule(
module_filename,
module_package,
source_code,
is_top,
is_main,
is_shlib,
is_fake,
hide_syntax_error,
):
# Many details to deal with, pylint: disable=too-many-locals
(
module_name,
main_added,
is_package,
is_namespace,
source_ref,
source_filename,
) = _decideModuleSourceRef(
filename=module_filename,
package=module_package,
is_top=is_top,
is_main=is_main,
is_shlib=is_shlib,
is_fake=is_fake,
)
# Read source code if necessary. Might give a SyntaxError due to not being proper
# encoded source.
if source_filename is not None and not is_namespace and not is_shlib:
try:
# For fake modules, source is provided directly.
if source_code is None:
source_code = readSourceCodeFromFilename(
module_name=module_name, source_filename=source_filename
)
except SyntaxError as e:
# Avoid hiding our own syntax errors.
if not hasattr(e, "generated_by_nuitka"):
raise
# Do not hide SyntaxError in main module.
if not hide_syntax_error:
raise
module = _makeModuleBodyFromSyntaxError(
exc=e, module_name=module_name, module_filename=module_filename
)
return module, True
try:
ast_tree = parseSourceCodeToAst(
source_code=source_code,
module_name=module_name,
filename=source_filename,
line_offset=0,
)
except (SyntaxError, IndentationError) as e:
# Do not hide SyntaxError if asked not to.
if not hide_syntax_error:
raise
module = _makeModuleBodyFromSyntaxError(
exc=e, module_name=module_name, module_filename=module_filename
)
return module, True
except CodeTooComplexCode:
# Do not hide CodeTooComplexCode in main module.
if is_main:
raise
module = _makeModuleBodyTooComplex(
module_name=module_name,
module_filename=module_filename,
source_code=source_code,
is_package=is_package,
)
return module, False
else:
ast_tree = None
source_code = None
module = _createModule(
module_name=module_name,
package=module_package,
source_code=source_code,
source_ref=source_ref,
is_top=is_top,
is_main=is_main,
is_shlib=is_shlib,
is_namespace=is_namespace,
is_package=is_package,
main_added=main_added,
)
if is_top:
ModuleRegistry.addRootModule(module)
OutputDirectories.setMainModule(module)
if module.isCompiledPythonModule() and source_code is not None:
createModuleTree(
module=module,
source_ref=source_ref,
ast_tree=ast_tree,
is_main=is_main,
)
return module, True
|
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implementer
from typing import Any, Dict, List, Optional, Tuple
irc.ERR_NOSUCHXINFO = "772"
irc.RPL_XINFOENTRY = "773"
irc.RPL_XINFOEND = "774"
irc.RPL_XINFOTYPE = "775"
@implementer(IPlugin, IModuleData)
class StatsCommand(ModuleData):
name = "StatsCommand"
core = True
def userCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("STATS", 1, UserStats(self.ircd)) ]
def serverCommands(self) -> List[Tuple[str, int, Command]]:
return [ ("INFOREQ", 1, ServerInfoRequest(self.ircd)),
("INFO", 1, ServerInfo(self.ircd)),
("INFOEND", 1, ServerInfoEnd(self.ircd)) ]
def verifyConfig(self, config: Dict[str, Any]) -> None:
if "public_info" in config:
if not isinstance(config["public_info"], list):
raise ConfigValidationError("public_info", "value must be a list")
for info in config["public_info"]:
if not isinstance(info, str):
raise ConfigValidationError("public_info", "every entry must be a string")
@implementer(ICommand)
class UserStats(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, user: "IRCUser", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if not params:
user.sendSingleError("StatsParams", irc.ERR_NEEDMOREPARAMS, "STATS", "Not enough parameters")
return None
typeName = params[0].lower()
if len(params) >= 2 and params[1] != self.ircd.name:
if params[1] not in self.ircd.serverNames:
user.sendSingleError("StatsServer", irc.ERR_NOSUCHSERVER, params[1], "No such server")
return None
return {
"type": typeName,
"server": self.ircd.serverNames[params[1]]
}
return {
"type": typeName
}
def execute(self, user: "IRCUser", data: Dict[Any, Any]) -> bool:
typeName = data["type"]
if "server" in data:
server = data["server"]
server.sendMessage("INFOREQ", server.serverID, typeName, prefix=user.uuid)
return True
if typeName is None:
if self.ircd.runActionUntilValue("userhasoperpermission", user, "info-unknown", users=[user]):
user.sendMessage(irc.ERR_NOSUCHXINFO, typeName, "No such stats type available")
else:
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the operator permission to run stats {}".format(typeName))
return True
if not self.checkPermission(user, typeName):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the operator permission to run stats {}".format(typeName))
return True
results = self.ircd.runComboActionUntilValue((("statsruntype", (typeName,)), ("statsruntype-{}".format(typeName), ())), users=[user])
if results:
for key, val in results.items():
user.sendMessage(irc.RPL_XINFOENTRY, typeName, key, val)
# The spec technically allows more than one key/value pair on a line
# If we do that, we'll need to make sure that if there's a space in the value,
# it ends the line.
user.sendMessage(irc.RPL_XINFOEND, typeName, "End of STATS request")
return True
def checkPermission(self, user: "IRCUser", typeName: str) -> bool:
if typeName in self.ircd.config.get("public_info", []):
return True
if self.ircd.runActionUntilValue("userhasoperpermission", user, "info-{}".format(typeName.lower()), users=[user]):
return True
return False
@implementer(ICommand)
class ServerInfoRequest(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if prefix not in self.ircd.users:
if prefix in self.ircd.recentlyQuitUsers:
return {
"lostuser": True
}
return None
if params[0] != self.ircd.serverID and params[0] not in self.ircd.servers:
if params[0] in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
return {
"user": self.ircd.users[prefix],
"server": params[0],
"type": params[1]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostuser" in data or "lostserver" in data:
return True
serverID = data["server"]
typeName = data["type"]
if serverID == self.ircd.serverID:
user = data["user"]
destServer = self.ircd.servers[user.uuid[:3]]
results = self.ircd.runComboActionUntilValue((("statsruntype", (typeName,)), ("statsruntype-{}".format(typeName), ())), users=[user])
if results:
for key, val in results.items():
destServer.sendMessage("INFO", user.uuid, typeName, key, val, prefix=self.ircd.serverID)
destServer.sendMessage("INFOEND", user.uuid, typeName, prefix=self.ircd.serverID)
return True
nextServer = self.ircd.servers[serverID]
nextServer.sendMessage("INFOREQ", serverID, typeName, prefix=data["user"].uuid)
return True
@implementer(ICommand)
class ServerInfo(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) < 4 or len(params) % 2 != 0:
return None
if prefix not in self.ircd.servers:
if prefix in self.ircd.recentlyQuitServers:
return {
"lostserver": True
}
return None
if params[0] not in self.ircd.users:
if params[0] in self.ircd.recentlyQuitUsers:
return {
"lostuser": True
}
return None
response = {}
for i in range(2, len(params), 2):
response[params[i]] = params[i+1]
return {
"user": self.ircd.users[params[0]],
"source": prefix,
"type": params[1],
"data": response
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
if "lostuser" in data or "lostserver" in data:
return True
typeName = data["type"]
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
sourceServerName = self.ircd.servers[data["source"]].name
for key, val in data["data"]:
user.sendMessage(irc.RPL_XINFOENTRY, typeName, key, val, prefix=sourceServerName)
return True
responseList = []
for key, val in data["data"]:
responseList.append("{} {}".format(key, val))
destServer = self.ircd.servers[user.uuid[:3]]
destServer.sendMessage("INFO", user.uuid, typeName, *responseList, prefix=data["source"])
return True
@implementer(ICommand)
class ServerInfoEnd(Command):
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server: "IRCServer", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:
if len(params) != 2:
return None
if prefix not in self.ircd.servers:
return None
if params[0] not in self.ircd.users:
return None
return {
"user": self.ircd.users[params[0]],
"type": params[1],
"source": self.ircd.servers[prefix]
}
def execute(self, server: "IRCServer", data: Dict[Any, Any]) -> bool:
user = data["user"]
if user.uuid[:3] == self.ircd.serverID:
user.sendMessage(irc.RPL_XINFOEND, data["type"], "End of STATS request", prefix=data["source"].name)
return True
nextServer = self.ircd.servers[user.uuid[:3]]
nextServer.sendMessage("INFOEND", user.uuid, data["type"], prefix=data["source"].serverID)
return True
statsCmd = StatsCommand()
|
|
# Test that parameters are preserved when written out and read in again
from __future__ import print_function, division
from itertools import product
from numpy.testing import assert_equal
from astropy.tests.helper import pytest
from .. import OutputConf, RunConf, ImageConf, BinnedImageConf, PeeledImageConf
from ...util.functions import virtual_file
@pytest.mark.parametrize(('attribute', 'value'),
list(product(['output_density', 'output_density_diff',
'output_specific_energy', 'output_n_photons'],
['none', 'last', 'all'])))
def test_io_output_conf(attribute, value):
o1 = OutputConf()
setattr(o1, attribute, value)
v = virtual_file()
o1.write(v)
o2 = OutputConf.read(v)
assert getattr(o2, attribute) == value
def test_io_image_conf():
i1 = ImageConf()
i1.set_image_size(33, 42)
i1.set_image_limits(3.2, 4.4, 5.2, 9.9)
i1.set_aperture_range(6, 1.2, 8.8)
i1.set_wavelength_range(9, 2.2, 7.4)
i1.set_output_bytes(4)
i1.set_track_origin('basic')
i1.set_uncertainties(True)
v = virtual_file()
i1.write(v)
i2 = ImageConf.read(v)
assert i2.n_x == i1.n_x
assert i2.n_y == i1.n_y
assert i2.xmin == i1.xmin
assert i2.xmax == i1.xmax
assert i2.ymin == i1.ymin
assert i2.ymax == i1.ymax
assert i2.n_ap == i1.n_ap
assert i2.ap_min == i1.ap_min
assert i2.ap_max == i1.ap_max
assert i2.n_wav == i1.n_wav
assert i2.wav_min == i1.wav_min
assert i2.wav_max == i1.wav_max
assert i2.io_bytes == i1.io_bytes
assert i2.track_origin == i1.track_origin
assert i2.uncertainties == i1.uncertainties
def test_io_binned_image_conf():
i1 = BinnedImageConf()
i1.set_image_size(33, 42)
i1.set_image_limits(3.2, 4.4, 5.2, 9.9)
i1.set_aperture_range(6, 1.2, 8.8)
i1.set_wavelength_range(9, 2.2, 7.4)
i1.set_viewing_bins(76, 22)
v = virtual_file()
i1.write(v)
i2 = BinnedImageConf.read(v)
assert i2.n_theta == i1.n_theta
assert i2.n_phi == i1.n_phi
def test_io_peeled_image_conf():
i1 = PeeledImageConf()
i1.set_image_size(33, 42)
i1.set_image_limits(3.2, 4.4, 5.2, 9.9)
i1.set_aperture_range(6, 1.2, 8.8)
i1.set_wavelength_range(9, 2.2, 7.4)
i1.set_viewing_angles([1., 2., 3], [4., 5., 6.])
i1.set_peeloff_origin([2.2, 3.3, 7.6])
i1.set_ignore_optical_depth(True)
i1.set_depth(-1.7, 6.2)
v = virtual_file()
i1.write(v)
i2 = PeeledImageConf.read(v)
for i in range(len(i2.viewing_angles)):
assert i2.viewing_angles[i][0] == i1.viewing_angles[i][0]
assert i2.viewing_angles[i][1] == i1.viewing_angles[i][1]
assert_equal(i2.peeloff_origin, i1.peeloff_origin)
assert i2.ignore_optical_depth == i1.ignore_optical_depth
assert i2.d_min == i1.d_min
assert i2.d_max == i1.d_max
def test_io_peeled_image_conf_inside():
i1 = PeeledImageConf()
i1.set_image_size(33, 42)
i1.set_image_limits(3.2, -4.4, 5.2, 9.9)
i1.set_aperture_range(6, 1.2, 8.8)
i1.set_wavelength_range(9, 2.2, 7.4)
i1.set_viewing_angles([1., 2., 3], [4., 5., 6.])
i1.set_inside_observer([7., 8., 9.])
i1.set_ignore_optical_depth(True)
i1.set_depth(1.7, 6.2)
v = virtual_file()
i1.write(v)
i2 = PeeledImageConf.read(v)
for i in range(len(i2.viewing_angles)):
assert i2.viewing_angles[i][0] == i1.viewing_angles[i][0]
assert i2.viewing_angles[i][1] == i1.viewing_angles[i][1]
assert_equal(i2.inside_observer, i1.inside_observer)
assert i2.ignore_optical_depth == i1.ignore_optical_depth
assert i2.d_min == i1.d_min
assert i2.d_max == i1.d_max
# RUNTIME CONFIGURATION
@pytest.mark.parametrize(('value'), [0.001, 0.1, 1.])
def test_io_run_conf_propagation_check_frequency(value):
r1 = RunConf()
r1.set_propagation_check_frequency(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2._frequency == r1._frequency
@pytest.mark.parametrize(('value'), [-1234, -6663121])
def test_io_run_conf_seed(value):
r1 = RunConf()
r1.set_seed(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2._seed == r1._seed
@pytest.mark.parametrize(('value'), [2, 5, 102])
def test_io_run_conf_n_initial_iterations(value):
r1 = RunConf()
r1.set_n_initial_iterations(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.n_iter == r1.n_iter
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_raytracing(value):
r1 = RunConf()
r1.set_raytracing(value)
if value:
r1.set_n_photons(1, 2, raytracing_sources=3, raytracing_dust=4)
else:
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.raytracing == r1.raytracing
def test_io_run_conf_n_photons_plain():
r1 = RunConf()
r1.set_n_photons(initial=1, imaging=2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
for key in r1.n_photons:
assert r2.n_photons[key] == r1.n_photons[key]
def test_io_run_conf_n_photons_raytracing():
r1 = RunConf()
r1.set_raytracing(True)
r1.set_n_photons(initial=1, imaging=2, raytracing_sources=3,
raytracing_dust=4)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
for key in r1.n_photons:
assert r2.n_photons[key] == r1.n_photons[key]
def test_io_run_conf_n_photons_monochromatic():
r1 = RunConf()
r1._monochromatic = True
r1.set_n_photons(initial=1, imaging_sources=3, imaging_dust=4)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2._monochromatic = True
r2.read_run_conf(v)
for key in r1.n_photons:
assert r2.n_photons[key] == r1.n_photons[key]
@pytest.mark.parametrize(('value'), [33, 5283])
def test_io_run_conf_max_interactions(value):
r1 = RunConf()
r1.set_max_interactions(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.n_inter_max == r1.n_inter_max
@pytest.mark.parametrize(('value'), [77, 1244])
def test_io_run_conf_max_reabsorptions(value):
r1 = RunConf()
r1.set_max_reabsorptions(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.n_reabs_max == r1.n_reabs_max
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_pda(value):
r1 = RunConf()
r1.set_pda(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.pda == r1.pda
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_mrw(value):
r1 = RunConf()
r1.set_mrw(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.mrw == r1.mrw
@pytest.mark.parametrize(('value'), [False, True])
def test_io_run_conf_convergence(value):
r1 = RunConf()
if value:
r1.set_convergence(value, percentile=12., absolute=34., relative=56.)
else:
r1.set_convergence(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.check_convergence == r1.check_convergence
assert r2.convergence_percentile == r1.convergence_percentile
assert r2.convergence_absolute == r1.convergence_absolute
assert r2.convergence_relative == r1.convergence_relative
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_kill_on_absorb(value):
r1 = RunConf()
r1.set_kill_on_absorb(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.kill_on_absorb == r1.kill_on_absorb
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_forced_first_scattering(value):
r1 = RunConf()
r1.set_forced_first_scattering(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.forced_first_scattering == r1.forced_first_scattering
@pytest.mark.parametrize(('value'), [4, 8])
def test_io_run_conf_output_bytes(value):
r1 = RunConf()
r1.set_output_bytes(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.physics_io_bytes == r1.physics_io_bytes
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_sample_sources_evenly(value):
r1 = RunConf()
r1.set_sample_sources_evenly(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.sample_sources_evenly == r1.sample_sources_evenly
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_enforce_energy_range(value):
r1 = RunConf()
r1.set_enforce_energy_range(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.enforce_energy_range == r1.enforce_energy_range
@pytest.mark.parametrize(('value'), [True, False])
def test_io_run_conf_copy_input(value):
r1 = RunConf()
r1.set_copy_input(value)
r1.set_n_photons(1, 2)
v = virtual_file()
r1.write_run_conf(v)
r2 = RunConf()
r2.read_run_conf(v)
assert r2.copy_input == r1.copy_input
|
|
import unittest
from Vintageous.ex.parser.nodes import RangeNode
from Vintageous.ex.parser.nodes import CommandLineNode
from Vintageous.ex.parser.tokens import TokenDot
from Vintageous.ex.parser.tokens import TokenDigits
from Vintageous.ex.parser.tokens import TokenSearchForward
from Vintageous.ex.parser.tokens import TokenSearchBackward
from Vintageous.ex.parser.tokens import TokenPercent
from Vintageous.ex.parser.tokens import TokenOffset
from Vintageous.ex.parser.tokens import TokenMark
from Vintageous.ex.parser.scanner_command_substitute import TokenCommandSubstitute
from Vintageous.tests import ViewTest
class RangeNode_Tests(unittest.TestCase):
def testCanInstantiate(self):
node = RangeNode('foo', 'bar', ';')
node.start_offset = [10]
node.end_offset = [10]
self.assertEqual(node.start, 'foo')
self.assertEqual(node.end, 'bar')
self.assertEqual(node.start_offset, [10])
self.assertEqual(node.end_offset, [10])
self.assertEqual(node.separator, ';')
def test_CanDetectIfItsEmpty(self):
node = RangeNode()
self.assertTrue(node.is_empty)
class CommandLineNode_Tests(unittest.TestCase):
def testCanInstantiate(self):
range_node = RangeNode("foo", "bar", False)
range_node.start_offset = [10]
range_node.end_offset = [10]
command = TokenCommandSubstitute({})
node = CommandLineNode(range_node, command)
self.assertEqual(range_node, node.line_range)
self.assertEqual(command, node.command)
class RangeNode_resolve_notation_Tests(ViewTest):
def testRetursCurrentLineIfRangeIsEmpty(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode().resolve(self.view)
self.assert_equal_regions(self.R(0, 8), region)
def testRetursCurrentLineIfRangeIsEmpty2(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
''')
self.clear_sel()
self.add_sel(self.R((1,0), (1,0)))
region = RangeNode().resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testRetursCurrentLineIfRangeIsEmptyAndAddsOffset(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenOffset([1, 1])]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
def testRetursCurrentLineIfRangeIsEmptyAndAddsOffsets(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenOffset([2])]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
def testRetursRequestedStartLineNumber(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenDigits('2')]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testRetursRequestedStartLineNumberAndAddsOffset(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenDigits('2')], start_offset=[1]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
def testRetursRequestedStartLineNumberAndAddsOffset(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenDigits('2'), TokenOffset([2])]).resolve(self.view)
self.assert_equal_regions(self.R(24, 32), region)
def testRetursWholeBufferIfPercentRequested(self):
self.write('''aaa aaa
bbb bbb
ccc ccc
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenPercent()]).resolve(self.view)
self.assert_equal_regions(self.R(0, 32), region)
class Tests_SearchForward(ViewTest):
def testCanSearchForward(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd cat
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenSearchForward('cat')]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
def testCanSearchForwardWithOffset(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd ddd
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenSearchForward('cat'), TokenOffset([1])]).resolve(self.view)
self.assert_equal_regions(self.R(24, 32), region)
def testFailedSearchThrows(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd cat
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
line_range = RangeNode(start=[TokenSearchForward('dog')])
self.assertRaises(ValueError, line_range.resolve, self.view)
def testCanSearchMultipleTimesForward(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd ddd
eee eee
fff cat
''')
self.clear_sel()
self.add_sel(self.R((0,0), (0,0)))
region = RangeNode(start=[TokenSearchForward('cat'), TokenSearchForward('cat')]).resolve(self.view)
self.assert_equal_regions(self.R(40, 48), region)
class Tests_SearchBackward(ViewTest):
def testCanSearchBackward(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd ddd
xxx xxx
''')
self.clear_sel()
self.add_sel(self.R(self.view.size()))
region = RangeNode(start=[TokenSearchBackward('cat')]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
def testCanSearchBackwardWithOffset(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd ddd
xxx xxx
''')
self.clear_sel()
self.add_sel(self.R(self.view.size()))
region = RangeNode(start=[TokenSearchBackward('cat'), TokenOffset([1])]).resolve(self.view)
self.assert_equal_regions(self.R(24, 32), region)
def testFailedSearchThrows(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd cat
''')
self.clear_sel()
self.add_sel(self.R(self.view.size()))
line_range = RangeNode(start=[TokenSearchBackward('dog')])
self.assertRaises(ValueError, line_range.resolve, self.view)
def testCanSearchMultipleTimesBackward(self):
self.write('''aaa aaa
bbb bbb
ccc cat
ddd cat
eee eee
fff fff
''')
self.clear_sel()
self.add_sel(self.R(self.view.size()))
region = RangeNode(start=[TokenSearchBackward('cat'), TokenSearchBackward('cat')]).resolve(self.view)
self.assert_equal_regions(self.R(16, 24), region)
class Tests_Line0(ViewTest):
def testCanCalculateVisualStart(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
region = RangeNode(start=[TokenDigits('0')]).resolve(self.view)
self.assert_equal_regions(self.R(-1, -1), region)
class Tests_Marks(ViewTest):
def testCanCalculateVisualStart(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
region = RangeNode(start=[TokenMark("<")]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testCanCalculateVisualStartWithMultipleSels(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
xxx xxx
ccc ccc
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
self.add_sel(self.R(24, 27))
region = RangeNode(start=[TokenMark("<")]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testCanCalculateVisualEnd(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
region = RangeNode(start=[TokenMark(">")]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testCanCalculateVisualEndWithMultipleSels(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
xxx xxx
ccc ccc
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
self.add_sel(self.R(24, 27))
region = RangeNode(start=[TokenMark(">")]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
def testCanCalculateVisualEndWithMultipleSels(self):
self.write('''xxx xxx
aaa aaa
xxx xxx
bbb bbb
xxx xxx
ccc ccc
''')
self.clear_sel()
self.add_sel(self.R(8, 10))
region = RangeNode(start=[TokenMark("<"), TokenMark(">")]).resolve(self.view)
self.assert_equal_regions(self.R(8, 16), region)
|
|
# -*- coding: utf-8 -*-
"""
F test for null hypothesis that coefficients in several regressions are the same
* implemented by creating groupdummies*exog and testing appropriate contrast
matrices
* similar to test for structural change in all variables at predefined break points
* allows only one group variable
* currently tests for change in all exog variables
* allows for heterogscedasticity, error variance varies across groups
* does not work if there is a group with only a single observation
TODO
----
* generalize anova structure,
- structural break in only some variables
- compare structural breaks in several exog versus constant only
- fast way to construct comparisons
* print anova style results
* add all pairwise comparison tests (DONE) with and without Bonferroni correction
* add additional test, likelihood-ratio, lagrange-multiplier, wald ?
* test for heteroscedasticity, equality of variances
- how?
- like lagrange-multiplier in stattools heteroscedasticity tests
* permutation or bootstrap test statistic or pvalues
References
----------
Greene: section 7.4 Modeling and Testing for a Structural Break
is not the same because I use a different normalization, which looks easier
for more than 2 groups/subperiods
after looking at Greene:
* my version assumes that all groups are large enough to estimate the coefficients
* in sections 7.4.2 and 7.5.3, predictive tests can also be used when there are
insufficient (nobs<nvars) observations in one group/subperiods
question: can this be used to test structural change for last period?
cusum test but only for current period,
in general cusum is better done with recursive ols
check other references again for this, there was one for non-recursive
calculation of cusum (if I remember correctly)
* Greene 7.4.4: with unequal variances Greene mentions Wald test, but where
size of test might not be very good
no mention of F-test based on GLS, is there a reference for what I did?
alternative: use Wald test with bootstrap pvalues?
Created on Sat Mar 27 01:48:01 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS, WLS
class OneWayLS(object):
'''Class to test equality of regression coefficients across groups
This class performs tests whether the linear regression coefficients are
the same across pre-specified groups. This can be used to test for
structural breaks at given change points, or for ANOVA style analysis of
differences in the effect of explanatory variables across groups.
Notes
-----
The test is implemented by regression on the original pooled exogenous
variables and on group dummies times the exogenous regressors.
y_i = X_i beta_i + u_i for all groups i
The test is for the null hypothesis: beta_i = beta for all i
against the alternative that at least one beta_i is different.
By default it is assumed that all u_i have the same variance. If the
keyword option het is True, then it is assumed that the variance is
group specific. This uses WLS with weights given by the standard errors
from separate regressions for each group.
Note: het=True is not sufficiently tested
The F-test assumes that the errors are normally distributed.
original question from mailing list for equality of coefficients
across regressions, and example in Stata FAQ
*testing*:
* if constant is the only regressor then the result for the F-test is
the same as scipy.stats.f_oneway
(which in turn is verified against NIST for not badly scaled problems)
* f-test for simple structural break is the same as in original script
* power and size of test look ok in examples
* not checked/verified for heteroskedastic case
- for constant only: ftest result is the same with WLS as with OLS - check?
check: I might be mixing up group names (unique)
and group id (integers in arange(ngroups)
not tested for groups that are not arange(ngroups)
make sure groupnames are always consistently sorted/ordered
Fixed for getting the results, but groups are not printed yet, still
inconsistent use for summaries of results.
'''
def __init__(self, y, x, groups=None, het=False, data=None, meta=None):
if groups is None:
raise ValueError('use OLS if there are no groups')
#maybe replace by dispatch to OLS
if data:
y = data[y]
x = [data[v] for v in x]
try:
groups = data[groups]
except [KeyError, ValueError]:
pass
self.endog = np.asarray(y)
self.exog = np.asarray(x)
if self.exog.ndim == 1:
self.exog = self.exog[:,None]
self.groups = np.asarray(groups)
self.het = het
self.groupsint = None
if np.issubdtype(self.groups.dtype, int):
self.unique = np.unique(self.groups)
if (self.unique == np.arange(len(self.unique))).all():
self.groupsint = self.groups
if self.groupsint is None: # groups are not consecutive integers
self.unique, self.groupsint = np.unique(self.groups, return_inverse=True)
self.uniqueint = np.arange(len(self.unique)) #as shortcut
def fitbygroups(self):
'''Fit OLS regression for each group separately.
Returns
-------
results are attached
olsbygroup : dictionary of result instance
the returned regression results for each group
sigmabygroup : array (ngroups,) (this should be called sigma2group ??? check)
mse_resid for each group
weights : array (nobs,)
standard deviation of group extended to the original observations. This can
be used as weights in WLS for group-wise heteroscedasticity.
'''
olsbygroup = {}
sigmabygroup = []
for gi, group in enumerate(self.unique): #np.arange(len(self.unique))):
groupmask = self.groupsint == gi #group index
res = OLS(self.endog[groupmask], self.exog[groupmask]).fit()
olsbygroup[group] = res
sigmabygroup.append(res.mse_resid)
self.olsbygroup = olsbygroup
self.sigmabygroup = np.array(sigmabygroup)
self.weights = np.sqrt(self.sigmabygroup[self.groupsint]) #TODO:chk sqrt
def fitjoint(self):
'''fit a joint fixed effects model to all observations
The regression results are attached as `lsjoint`.
The contrasts for overall and pairwise tests for equality of coefficients are
attached as a dictionary `contrasts`. This also includes the contrasts for the test
that the coefficients of a level are zero. ::
>>> res.contrasts.keys()
[(0, 1), 1, 'all', 3, (1, 2), 2, (1, 3), (2, 3), (0, 3), (0, 2)]
The keys are based on the original names or labels of the groups.
TODO: keys can be numpy scalars and then the keys cannot be sorted
'''
if not hasattr(self, 'weights'):
self.fitbygroups()
groupdummy = (self.groupsint[:,None] == self.uniqueint).astype(int)
#order of dummy variables by variable - not used
#dummyexog = self.exog[:,:,None]*groupdummy[:,None,1:]
#order of dummy variables by grous - used
dummyexog = self.exog[:,None,:]*groupdummy[:,1:,None]
exog = np.c_[self.exog, dummyexog.reshape(self.exog.shape[0],-1)] #self.nobs ??
#Notes: I changed to drop first group from dummy
#instead I want one full set dummies
if self.het:
weights = self.weights
res = WLS(self.endog, exog, weights=weights).fit()
else:
res = OLS(self.endog, exog).fit()
self.lsjoint = res
contrasts = {}
nvars = self.exog.shape[1]
nparams = exog.shape[1]
ndummies = nparams - nvars
contrasts['all'] = np.c_[np.zeros((ndummies, nvars)), np.eye(ndummies)]
for groupind, group in enumerate(self.unique[1:]): #need enumerate if groups != groupsint
groupind = groupind + 1
contr = np.zeros((nvars, nparams))
contr[:,nvars*groupind:nvars*(groupind+1)] = np.eye(nvars)
contrasts[group] = contr
#save also for pairs, see next
contrasts[(self.unique[0], group)] = contr
#Note: I'm keeping some duplication for testing
pairs = np.triu_indices(len(self.unique),1)
for ind1,ind2 in zip(*pairs): #replace with group1, group2 in sorted(keys)
if ind1 == 0:
continue # need comparison with benchmark/normalization group separate
g1 = self.unique[ind1]
g2 = self.unique[ind2]
group = (g1, g2)
contr = np.zeros((nvars, nparams))
contr[:,nvars*ind1:nvars*(ind1+1)] = np.eye(nvars)
contr[:,nvars*ind2:nvars*(ind2+1)] = -np.eye(nvars)
contrasts[group] = contr
self.contrasts = contrasts
def fitpooled(self):
'''fit the pooled model, which assumes there are no differences across groups
'''
if self.het:
if not hasattr(self, 'weights'):
self.fitbygroups()
weights = self.weights
res = WLS(self.endog, self.exog, weights=weights).fit()
else:
res = OLS(self.endog, self.exog).fit()
self.lspooled = res
def ftest_summary(self):
'''run all ftests on the joint model
Returns
-------
fres : str
a string that lists the results of all individual f-tests
summarytable : list of tuples
contains (pair, (fvalue, pvalue,df_denom, df_num)) for each f-test
Note
----
This are the raw results and not formatted for nice printing.
'''
if not hasattr(self, 'lsjoint'):
self.fitjoint()
txt = []
summarytable = []
txt.append('F-test for equality of coefficients across groups')
fres = self.lsjoint.f_test(self.contrasts['all'])
txt.append(fres.__str__())
summarytable.append(('all',(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
# for group in self.unique[1:]: #replace with group1, group2 in sorted(keys)
# txt.append('F-test for equality of coefficients between group'
# ' %s and group %s' % (group, '0'))
# fres = self.lsjoint.f_test(self.contrasts[group])
# txt.append(fres.__str__())
# summarytable.append((group,(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
pairs = np.triu_indices(len(self.unique),1)
for ind1,ind2 in zip(*pairs): #replace with group1, group2 in sorted(keys)
g1 = self.unique[ind1]
g2 = self.unique[ind2]
txt.append('F-test for equality of coefficients between group'
' %s and group %s' % (g1, g2))
group = (g1, g2)
fres = self.lsjoint.f_test(self.contrasts[group])
txt.append(fres.__str__())
summarytable.append((group,(fres.fvalue, fres.pvalue, fres.df_denom, fres.df_num)))
self.summarytable = summarytable
return '\n'.join(txt), summarytable
def print_summary(self, res):
'''printable string of summary
'''
groupind = res.groups
#res.fitjoint() #not really necessary, because called by ftest_summary
if hasattr(res, 'self.summarytable'):
summtable = self.summarytable
else:
_, summtable = res.ftest_summary()
txt = ''
#print ft[0] #skip because table is nicer
templ = \
'''Table of F-tests for overall or pairwise equality of coefficients'
%(tab)s
Notes: p-values are not corrected for many tests
(no Bonferroni correction)
* : reject at 5%% uncorrected confidence level
Null hypothesis: all or pairwise coefficient are the same'
Alternative hypothesis: all coefficients are different'
Comparison with stats.f_oneway
%(statsfow)s
Likelihood Ratio Test
%(lrtest)s
Null model: pooled all coefficients are the same across groups,'
Alternative model: all coefficients are allowed to be different'
not verified but looks close to f-test result'
OLS parameters by group from individual, separate ols regressions'
%(olsbg)s
for group in sorted(res.olsbygroup):
r = res.olsbygroup[group]
print group, r.params
Check for heteroscedasticity, '
variance and standard deviation for individual regressions'
%(grh)s
variance ', res.sigmabygroup
standard dev', np.sqrt(res.sigmabygroup)
'''
from statsmodels.iolib import SimpleTable
resvals = {}
resvals['tab'] = str(SimpleTable([(['%r' % (row[0],)]
+ list(row[1])
+ ['*']*(row[1][1]>0.5).item() ) for row in summtable],
headers=['pair', 'F-statistic','p-value','df_denom',
'df_num']))
resvals['statsfow'] = str(stats.f_oneway(*[res.endog[groupind==gr] for gr in
res.unique]))
#resvals['lrtest'] = str(res.lr_test())
resvals['lrtest'] = str(SimpleTable([res.lr_test()],
headers=['likelihood ratio', 'p-value', 'df'] ))
resvals['olsbg'] = str(SimpleTable([[group]
+ res.olsbygroup[group].params.tolist()
for group in sorted(res.olsbygroup)]))
resvals['grh'] = str(SimpleTable(np.vstack([res.sigmabygroup,
np.sqrt(res.sigmabygroup)]),
headers=res.unique.tolist()))
return templ % resvals
# a variation of this has been added to RegressionResults as compare_lr
def lr_test(self):
r'''
generic likelihood ratio test between nested models
\begin{align}
D & = -2(\ln(\text{likelihood for null model}) - \ln(\text{likelihood for alternative model})) \\
& = -2\ln\left( \frac{\text{likelihood for null model}}{\text{likelihood for alternative model}} \right).
\end{align}
is distributed as chisquare with df equal to difference in number of parameters or equivalently
difference in residual degrees of freedom (sign?)
TODO: put into separate function
'''
if not hasattr(self, 'lsjoint'):
self.fitjoint()
if not hasattr(self, 'lspooled'):
self.fitpooled()
loglikejoint = self.lsjoint.llf
loglikepooled = self.lspooled.llf
lrstat = -2*(loglikepooled - loglikejoint) #??? check sign
lrdf = self.lspooled.df_resid - self.lsjoint.df_resid
lrpval = stats.chi2.sf(lrstat, lrdf)
return lrstat, lrpval, lrdf
|
|
#!/usr/bin/env python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import json
import time
import traceback
import base64
import binascii
from collections import namedtuple
from Crypto.Hash import SHA256 as HashAlg
from Crypto.PublicKey import RSA as CryptoKey
from Crypto import Random
from Crypto.Signature import PKCS1_PSS as CryptoSigner
import logging
from logging import Logger
logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
logger = logging.getLogger()
logger.setLevel( logging.INFO )
import syndicate.util.storage as syndicate_storage_api
import syndicate.observer.core as observer_core
import syndicate.observer.cred as observer_cred
import syndicate.syndicate as c_syndicate
CONFIG = observer_core.get_config()
observer_storage = observer_core.get_observer_storage()
TESTING = False
#-------------------------------
def do_push( sliver_hosts, portnum, payload ):
"""
Push a payload to a list of slivers.
NOTE: this has to be done in one go, since we can't import grequests
into the global namespace (without wrecking havoc on the credential server),
but it has to stick around for the push to work.
"""
global TESTING, CONFIG
from gevent import monkey
if TESTING:
monkey.patch_all()
else:
# make gevents runnabale from multiple threads (or Django will complain)
monkey.patch_all(socket=True, dns=True, time=True, select=True, thread=False, os=True, ssl=True, httplib=False, aggressive=True)
import grequests
# fan-out
requests = []
for sh in sliver_hosts:
data = {observer_cred.OPENCLOUD_JSON: payload, observer_cred.OPENCLOUD_SLIVER_HOSTNAME: sh}
# TODO: https, using the sliver's public key, since we're pushing over the hostname
rs = grequests.post( "http://" + sh + ":" + str(portnum), data=data, timeout=getattr(CONFIG, "SYNDICATE_HTTP_PUSH_TIMEOUT", 60) )
requests.append( rs )
# fan-in
responses = grequests.map( requests )
assert len(responses) == len(requests), "grequests error: len(responses) != len(requests)"
for i in xrange(0,len(requests)):
resp = responses[i]
req = requests[i]
if resp is None:
logger.error("Failed to connect to %s" % (req.url))
continue
# verify they all worked
if resp.status_code != 200:
logger.error("Failed to POST to %s, status code = %s" % (resp.url, resp.status_code))
continue
return True
#-------------------------------
def push_credentials_to_slice( slice_name, payload ):
"""
Push a credentials payload to the VMs in a slice.
"""
hostnames = observer_storage.get_slice_hostnames( slice_name )
return do_push( hostnames, CONFIG.SYNDICATE_SLIVER_PORT, payload )
def ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port,
instantiate_UG=None, run_UG=None, UG_port=0, UG_closure=None,
instantiate_RG=None, run_RG=None, RG_port=0, RG_closure=None, RG_global_hostname=None,
instantiate_AG=None, run_AG=None, AG_port=0, AG_closure=None, AG_global_hostname=None,
gateway_name_prefix="" ):
"""
Push credentials to a single host.
"""
c_syndicate.crypto_init()
observer_key = syndicate_storage_api.read_private_key( CONFIG.SYNDICATE_OBSERVER_PRIVATE_KEY )
user_key = syndicate_storage_api.read_private_key( principal_pkey_path )
observer_key_pem = observer_key.exportKey()
user_pkey_pem = user_key.exportKey()
if observer_key_pem is None:
raise Exception("Failed to read observer private key from %s" % observer_key_pem )
if user_pkey_pem is None:
raise Exception("Failed to read user private key from %s" % principal_pkey_path )
# convert to binary
slice_secret = binascii.unhexlify( slice_secret )
cred = observer_cred.create_slice_credential_blob( observer_key_pem, slice_name, slice_secret, syndicate_url, volume_name, volume_owner, user_pkey_pem,
instantiate_UG=instantiate_UG, run_UG=run_UG, UG_port=UG_port, UG_closure=UG_closure,
instantiate_RG=instantiate_RG, run_RG=run_RG, RG_port=RG_port, RG_closure=RG_closure, RG_global_hostname=RG_global_hostname,
instantiate_AG=instantiate_AG, run_AG=run_AG, AG_port=AG_port, AG_closure=AG_closure, AG_global_hostname=AG_global_hostname,
gateway_name_prefix=gateway_name_prefix )
if cred is None:
raise Exception("Failed to generate slice credential")
rc = do_push( [hostname], automount_daemon_port, cred )
c_syndicate.crypto_shutdown()
#-------------------------------
def ft_do_nothing_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port ):
"""
Push credentials to a single host.
"""
return ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, gateway_name_prefix="OpenCloud" )
#-------------------------------
def ft_do_create_UG_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, UG_port ):
"""
Push credentials to a single host.
"""
return ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, gateway_name_prefix="OpenCloud",
instantiate_UG=True, run_UG=True, UG_port=UG_port, UG_closure=None )
#-------------------------------
def ft_do_start_UG_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port ):
"""
Push credentials to a single host.
"""
return ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, gateway_name_prefix="OpenCloud",
instantiate_UG=None, run_UG=True, UG_port=0, UG_closure=None )
#-------------------------------
def ft_do_stop_UG_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port ):
"""
Push credentials to a single host.
"""
return ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, gateway_name_prefix="OpenCloud",
instantiate_UG=None, run_UG=False, UG_port=0, UG_closure=None )
#-------------------------------
def ft_do_delete_UG_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port ):
"""
Push credentials to a single host.
"""
return ft_do_push( syndicate_url, volume_name, volume_owner, slice_name, slice_secret, principal_pkey_path, hostname, automount_daemon_port, gateway_name_prefix="OpenCloud",
instantiate_UG=False, run_UG=False, UG_port=0, UG_closure=None )
# run functional tests
if __name__ == "__main__":
argv = sys.argv[:]
if len(argv) < 2:
print "Usage: %s testname [args]" % argv[0]
TESTING = True
# call a method starting with ft_, and then pass the rest of argv as its arguments
testname = argv[1]
ft_testname = "ft_%s" % testname
test_call = "%s(%s)" % (ft_testname, ",".join(argv[2:]))
print "calling %s" % test_call
rc = eval( test_call )
print "result = %s" % rc
|
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble of SNGP models on ImageNet."""
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import utils # local file import from baselines.imagenet
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where the model weights and '
'training/evaluation summaries are stored.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# SNGP ensemble flags
flags.DEFINE_float(
'gp_mean_field_factor_ensemble', -1,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean.')
# Dropout flags.
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout during inference.')
flags.DEFINE_float('dropout_rate', 0., 'Dropout rate.')
flags.DEFINE_bool(
'filterwise_dropout', True, 'Dropout whole convolutional'
'filters instead of individual values in the feature map.')
# Spectral normalization flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input for GP layer using LayerNorm. This is '
'similar to applying automatic relevance determination (ARD) in the '
'classic GP literature.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random Fourier feature), '
'"orf" (orthogonal random feature).')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1,
'The discount factor to compute the moving average of precision matrix.'
'If -1 then instead compute the exact covariance at the lastest epoch.')
flags.DEFINE_bool(
'gp_output_imagenet_initializer', True,
'Whether to initialize GP output layer using Gaussian with small '
'standard deviation (sd=0.01).')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
FLAGS = flags.FLAGS
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
test_builder = ub.datasets.ImageNetDataset(
split=tfds.Split.TEST,
use_bfloat16=FLAGS.use_bfloat16,
data_dir=FLAGS.data_dir)
clean_test_dataset = test_builder.load(batch_size=batch_size)
test_datasets = {'clean': clean_test_dataset}
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
corruption_name=name,
corruption_intensity=intensity,
batch_size=batch_size,
drop_remainder=True,
use_bfloat16=False)
model = ub.models.resnet50_sngp(
input_shape=(224, 224, 3),
batch_size=FLAGS.per_core_batch_size,
num_classes=NUM_CLASSES,
use_mc_dropout=FLAGS.use_mc_dropout,
dropout_rate=FLAGS.dropout_rate,
filterwise_dropout=FLAGS.filterwise_dropout,
use_gp_layer=FLAGS.use_gp_layer,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
gp_output_imagenet_initializer=FLAGS.gp_output_imagenet_initializer,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features, _ = next(test_iterator) # pytype: disable=attribute-error
logits_member, covmat_member = model(features, training=False)
logits_member = ed.layers.utils.mean_field_logits(
logits_member, covmat_member, FLAGS.gp_mean_field_factor_ensemble)
logits.append(logits_member)
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(
name)] = rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
_, labels = next(test_iterator) # pytype: disable=attribute-error
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
negative_log_likelihood_metric.add_batch(logits, labels=labels)
negative_log_likelihood = list(
negative_log_likelihood_metric.result().values())[0]
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
if name == 'clean':
gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
gibbs_ce_metric.add_batch(logits, labels=labels)
gibbs_ce = list(gibbs_ce_metric.result().values())[0]
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].add_batch(
probs, label=labels)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity,
FLAGS.alexnet_errors_path)
total_results = {name: metric.result() for name, metric in metrics.items()}
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results.update(corrupt_results)
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
|
|
from datetime import datetime
from enum import Enum
from uuid import UUID
from flask import request
# This is somewhat :(
#
# So we've already batched the database fetches for a bunch of sqlalchemy
# objects together. These objects probably have joins/joinedloads, though,
# which may also need to do data fetches (and/or have joins themselves that
# have data fetches.) Without the function you're reading about now, those
# would all happen serially.
#
# So here's what we'll do. We'll write a function that operates on a list of
# already-converted dictionaries (from a Crumbler.) We can
# find every key/value pair within that dict that has its own data fetching to
# do [1], batch all of that data fetching together and crumble those objects.
# Now we can recursively run ourselves on that resulting list, and replace
# the original value with the final result from this greedy fetch.
#
# Another way of describing this is that we do a depth first search and crumble
# everything we come across, transforming the original structure in place.
#
# [1] The implmentation does this for every value that has its own Crumble object
# even if get_extra_attrs_from_db is a no-op
#
# Based on the way serialization is written, this cannot change what we'd
# output... we're basically allowed to run object_to_dict on objects whenever
# we want, as long as we make sure to run serialize on them afterwards. In this
# case, the caller of this function will run serialize on this entire tree
# after we're done.
#
# Philosophically, this function is written so that running it can only make
# things better: there may be corner cases where it fails to batch something
# that should be batched, but if it fails that won't break anything
#
# @param (data): a list of objects that have already had serializer run on them
#
# NOTE: right now, this function doesn't fully work
def greedily_try_to_batch_data_fetches(data, extended_registry):
# we only care about lists of dicts
if not isinstance(data, list) or not isinstance(data[0], dict):
return data
for k in data[0].keys():
if isinstance(data[0][k], _PASSTHROUGH):
# cheap check to see if value is boring before calling
# get_crumbler
continue
crumbler = get_crumbler(data[0][k], extended_registry)
if not crumbler:
# this isn't a key that might do data fetching
continue
objs_to_crumble = [o[k] for o in data]
# let's double check that all of these objects are the same type
# (missing objects/None values are ok)
if len(set(type(o) for o in objs_to_crumble if o)) != 1:
continue
# ok, batch the data fetch for all of these child keys and run
# serializer
attrs = crumbler.get_extra_attrs_from_db(objs_to_crumble)
replacements = [crumbler(o, attrs=attrs.get(o)) if o else None
for o in objs_to_crumble]
# we've serialized the child data fetch. But it might also have
# children that want to fetch data, so let's recursively call greed!
if isinstance(replacements[0], dict):
replacements = greedily_try_to_batch_data_fetches(replacements,
extended_registry)
# replace the originals with our fetched&crumbled objects
replacement_dict = dict(zip(objs_to_crumble, replacements))
for o in data:
if o[k] in replacement_dict:
o[k] = replacement_dict[o[k]]
return data
# Types for which serialization is a no-op.
_PASSTHROUGH = (basestring, bool, int, long, type(None), float)
def serialize(data, extended_registry=None, use_greedy=False):
"""
Converts a data structure of dicts, lists, SQLAlchemy objects, and other
random python objects into something that can be passed to JSON.dumps. This
is not a guarantee...if data contains an object that we don't know how to
handle, we'll just leave it in.
extended_registry: additional Crumblers to use. e.g. one API might
want to use a special crumbler for Jobs that also adds Build
information and passes { Job: JobWithBuildCrumbler }
Its safe (but CPU-expensive) to rerun serialize on data multiple times
"""
if extended_registry is None:
extended_registry = {}
if isinstance(data, _PASSTHROUGH):
return data
if isinstance(data, dict):
for k, v in data.iteritems():
if not isinstance(v, _PASSTHROUGH) or not isinstance(k, _PASSTHROUGH):
# Gotta do it the hard way.
return dict(zip(serialize(data.keys(), extended_registry),
serialize(data.values(), extended_registry)))
# All keys and values were passthrough, so the dict is already
# serialized.
return data
if isinstance(data, (list, tuple, set, frozenset)):
if not data:
return []
# if every item in the list is the same, we want to batch fetch any
# necessary data from the db before serializing them all
if len(set(type(g) for g in data)) == 1:
# Make sure it is a list.
if not isinstance(data, list):
data = list(data)
# If we have a list of passthrough, we're done.
if isinstance(data[0], _PASSTHROUGH):
return data
crumbler = get_crumbler(data[0], extended_registry)
if crumbler:
attrs = crumbler.get_extra_attrs_from_db(data)
data = [crumbler(o, attrs=attrs.get(o)) for o in data]
test_greedy_batching = use_greedy or int(request.args.get('__batch__', 0))
if test_greedy_batching:
greedily_try_to_batch_data_fetches(data, extended_registry)
return [serialize(j, extended_registry) for j in data]
# if we're here, we have a single object that we probably need to convert
# using a crumbler
crumbler = get_crumbler(data, extended_registry)
if crumbler is None:
return data
attrs = crumbler.get_extra_attrs_from_db([data])
data = crumbler(data, attrs=attrs.get(data))
return serialize(data, extended_registry)
#
# Crumbler code: the code that converts SQLAlchemy objects into dictionaries.
# Classes for individual SQLAlchemy objects are in models/
#
# We create a registry of crumblers using class decorators, e.g. use this class
# to convert DateTime objects to strings. serialize() just looks up the right
# class to use and calls it on its objects
#
_registry = {}
def register(type):
def wrapped(cls):
_registry[type] = cls()
return cls
return wrapped
def get_crumbler(item, registry):
item_type = type(item)
crumbler = registry.get(item_type, _registry.get(item_type))
if crumbler is None:
for cls, _crumbler in _registry.iteritems():
if issubclass(item_type, cls):
crumbler = _crumbler
break
return crumbler
class Crumbler(object):
"""
Converts an object (most often a SQLAlchemy object) to a dict/string/int.
This is shallow: the returned dict may have values that need to be crumbled
themselves.
Why "Crumble"? The name is suggestive of what the class does, and you very likely
went to these docs to find out more.
"""
def __call__(self, item, attrs):
return self.crumble(item, attrs)
def get_extra_attrs_from_db(self, item_list):
"""
We may need to do additional data fetching to convert an object: for
example, we want to look up the phabricator callsign when returning
revision objects. This function can take a list of objects to crumble
and returns an attrs dict (object => additional fetched data to use in
crumble)
"""
return {}
def crumble(self, item, attrs):
"""
Does the actual conversion from object to something simpler. attrs
should come from get_extra_attrs_from_db, e.g.:
all_attrs = cls.get_extra_attrs_from_db(item_list)
s = [cls.crumble(item, all_attrs.get(item)) for item in item_list]
"""
return {}
@register(datetime)
class DateTimeCrumbler(Crumbler):
def crumble(self, item, attrs):
return item.isoformat()
@register(Enum)
class EnumCrumbler(Crumbler):
def crumble(self, item, attrs):
return {
'id': item.name,
'name': unicode(item),
}
@register(UUID)
class UUIDCrumbler(Crumbler):
def crumble(self, item, attrs):
return item.hex
|
|
# -*- coding: utf-8 -*-
'''
Wrap libsodium routines
'''
# pylint: disable=C0103
# Import libnacl libs
from libnacl.version import __version__
# Import python libs
import ctypes
import sys
import os
__SONAMES = (18, 17, 13, 10, 5, 4)
def _get_nacl():
'''
Locate the nacl c libs to use
'''
# Import libsodium
if sys.platform.startswith('win'):
try:
return ctypes.cdll.LoadLibrary('libsodium')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium-{0}'.format(soname_ver)
)
except OSError:
pass
msg = 'Could not locate nacl lib, searched for libsodium'
raise OSError(msg)
elif sys.platform.startswith('darwin'):
try:
return ctypes.cdll.LoadLibrary('libsodium.dylib')
except OSError:
pass
try:
libidx = __file__.find('lib')
if libidx > 0:
libpath = __file__[0:libidx+3] + '/libsodium.dylib'
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
msg = 'Could not locate nacl lib, searched for libsodium'
raise OSError(msg)
else:
try:
return ctypes.cdll.LoadLibrary('libsodium.so')
except OSError:
pass
try:
return ctypes.cdll.LoadLibrary('/usr/local/lib/libsodium.so')
except OSError:
pass
try:
libidx = __file__.find('lib')
if libidx > 0:
libpath = __file__[0:libidx+3] + '/libsodium.so'
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium.so.{0}'.format(soname_ver)
)
except OSError:
pass
try:
# fall back to shipped libsodium, trust os version first
libpath = os.path.join(os.path.dirname(__file__), 'libsodium.so')
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
pass
msg = 'Could not locate nacl lib, searched for libsodium.so, '
for soname_ver in __SONAMES:
msg += 'libsodium.so.{0}, '.format(soname_ver)
raise OSError(msg)
nacl = _get_nacl()
# Define exceptions
class CryptError(Exception):
"""
Base Exception for cryptographic errors
"""
sodium_init = nacl.sodium_init
sodium_init.res_type = ctypes.c_int
if sodium_init() < 0:
raise RuntimeError('sodium_init() call failed!')
# Define constants
try:
crypto_box_SEALBYTES = nacl.crypto_box_sealbytes()
HAS_SEAL = True
except AttributeError:
HAS_SEAL = False
crypto_box_SECRETKEYBYTES = nacl.crypto_box_secretkeybytes()
crypto_box_SEEDBYTES = nacl.crypto_box_seedbytes()
crypto_box_PUBLICKEYBYTES = nacl.crypto_box_publickeybytes()
crypto_box_NONCEBYTES = nacl.crypto_box_noncebytes()
crypto_box_ZEROBYTES = nacl.crypto_box_zerobytes()
crypto_box_BOXZEROBYTES = nacl.crypto_box_boxzerobytes()
crypto_box_BEFORENMBYTES = nacl.crypto_box_beforenmbytes()
crypto_scalarmult_BYTES = nacl.crypto_scalarmult_bytes()
crypto_scalarmult_SCALARBYTES = nacl.crypto_scalarmult_scalarbytes()
crypto_sign_BYTES = nacl.crypto_sign_bytes()
crypto_sign_SEEDBYTES = nacl.crypto_sign_secretkeybytes() // 2
crypto_sign_PUBLICKEYBYTES = nacl.crypto_sign_publickeybytes()
crypto_sign_SECRETKEYBYTES = nacl.crypto_sign_secretkeybytes()
crypto_box_MACBYTES = crypto_box_ZEROBYTES - crypto_box_BOXZEROBYTES
crypto_secretbox_KEYBYTES = nacl.crypto_secretbox_keybytes()
crypto_secretbox_NONCEBYTES = nacl.crypto_secretbox_noncebytes()
crypto_secretbox_ZEROBYTES = nacl.crypto_secretbox_zerobytes()
crypto_secretbox_BOXZEROBYTES = nacl.crypto_secretbox_boxzerobytes()
crypto_secretbox_MACBYTES = crypto_secretbox_ZEROBYTES - crypto_secretbox_BOXZEROBYTES
crypto_stream_KEYBYTES = nacl.crypto_stream_keybytes()
crypto_stream_NONCEBYTES = nacl.crypto_stream_noncebytes()
crypto_auth_BYTES = nacl.crypto_auth_bytes()
crypto_auth_KEYBYTES = nacl.crypto_auth_keybytes()
crypto_onetimeauth_BYTES = nacl.crypto_onetimeauth_bytes()
crypto_onetimeauth_KEYBYTES = nacl.crypto_onetimeauth_keybytes()
crypto_generichash_BYTES = nacl.crypto_generichash_bytes()
crypto_generichash_BYTES_MIN = nacl.crypto_generichash_bytes_min()
crypto_generichash_BYTES_MAX = nacl.crypto_generichash_bytes_max()
crypto_generichash_KEYBYTES = nacl.crypto_generichash_keybytes()
crypto_generichash_KEYBYTES_MIN = nacl.crypto_generichash_keybytes_min()
crypto_generichash_KEYBYTES_MAX = nacl.crypto_generichash_keybytes_max()
crypto_scalarmult_curve25519_BYTES = nacl.crypto_scalarmult_curve25519_bytes()
crypto_hash_BYTES = nacl.crypto_hash_sha512_bytes()
crypto_hash_sha256_BYTES = nacl.crypto_hash_sha256_bytes()
crypto_hash_sha512_BYTES = nacl.crypto_hash_sha512_bytes()
crypto_verify_16_BYTES = nacl.crypto_verify_16_bytes()
crypto_verify_32_BYTES = nacl.crypto_verify_32_bytes()
crypto_verify_64_BYTES = nacl.crypto_verify_64_bytes()
# pylint: enable=C0103
# Pubkey defs
def crypto_box_keypair():
'''
Generate and return a new keypair
pk, sk = nacl.crypto_box_keypair()
'''
pk = ctypes.create_string_buffer(crypto_box_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_box_SECRETKEYBYTES)
nacl.crypto_box_keypair(pk, sk)
return pk.raw, sk.raw
def crypto_box(msg, nonce, pk, sk):
'''
Using a public key and a secret key encrypt the given message. A nonce
must also be passed in, never reuse the nonce
enc_msg = nacl.crypto_box('secret message', <unique nonce>, <public key string>, <secret key string>)
'''
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise ValueError('Invalid public key')
if len(sk) != crypto_box_SECRETKEYBYTES:
raise ValueError('Invalid secret key')
if len(nonce) != crypto_box_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_box_ZEROBYTES + msg
c = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_box(c, pad, ctypes.c_ulonglong(len(pad)), nonce, pk, sk)
if ret:
raise CryptError('Unable to encrypt message')
return c.raw[crypto_box_BOXZEROBYTES:]
def crypto_box_open(ctxt, nonce, pk, sk):
'''
Decrypts a message given the receivers private key, and senders public key
'''
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise ValueError('Invalid public key')
if len(sk) != crypto_box_SECRETKEYBYTES:
raise ValueError('Invalid secret key')
if len(nonce) != crypto_box_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_box_BOXZEROBYTES + ctxt
msg = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_box_open(
msg,
pad,
ctypes.c_ulonglong(len(pad)),
nonce,
pk,
sk)
if ret:
raise CryptError('Unable to decrypt ciphertext')
return msg.raw[crypto_box_ZEROBYTES:]
def crypto_box_beforenm(pk, sk):
'''
Partially performs the computation required for both encryption and decryption of data
'''
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise ValueError('Invalid public key')
if len(sk) != crypto_box_SECRETKEYBYTES:
raise ValueError('Invalid secret key')
k = ctypes.create_string_buffer(crypto_box_BEFORENMBYTES)
ret = nacl.crypto_box_beforenm(k, pk, sk)
if ret:
raise CryptError('Unable to compute shared key')
return k.raw
def crypto_box_afternm(msg, nonce, k):
'''
Encrypts a given a message, using partial computed data
'''
if len(k) != crypto_box_BEFORENMBYTES:
raise ValueError('Invalid shared key')
if len(nonce) != crypto_box_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_box_ZEROBYTES + msg
ctxt = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_box_afternm(ctxt, pad, ctypes.c_ulonglong(len(pad)), nonce, k)
if ret:
raise CryptError('Unable to encrypt messsage')
return ctxt.raw[crypto_box_BOXZEROBYTES:]
def crypto_box_open_afternm(ctxt, nonce, k):
'''
Decrypts a ciphertext ctxt given k
'''
if len(k) != crypto_box_BEFORENMBYTES:
raise ValueError('Invalid shared key')
if len(nonce) != crypto_box_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_box_BOXZEROBYTES + ctxt
msg = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_box_open_afternm(
msg,
pad,
ctypes.c_ulonglong(len(pad)),
nonce,
k)
if ret:
raise CryptError('unable to decrypt message')
return msg.raw[crypto_box_ZEROBYTES:]
def crypto_box_seal(msg, pk):
'''
Using a public key to encrypt the given message. The identity of the sender cannot be verified.
enc_msg = nacl.crypto_box_seal('secret message', <public key string>)
'''
if not HAS_SEAL:
raise ValueError('Underlying Sodium library does not support sealed boxes')
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise ValueError('Invalid public key')
if not isinstance(msg, bytes):
raise TypeError('Message must be bytes')
c = ctypes.create_string_buffer(len(msg) + crypto_box_SEALBYTES)
ret = nacl.crypto_box_seal(c, msg, ctypes.c_ulonglong(len(msg)), pk)
if ret:
raise CryptError('Unable to encrypt message')
return c.raw
def crypto_box_seal_open(ctxt, pk, sk):
'''
Decrypts a message given the receiver's public and private key.
'''
if not HAS_SEAL:
raise ValueError('Underlying Sodium library does not support sealed boxes')
if len(pk) != crypto_box_PUBLICKEYBYTES:
raise ValueError('Invalid public key')
if len(sk) != crypto_box_SECRETKEYBYTES:
raise ValueError('Invalid secret key')
if not isinstance(ctxt, bytes):
raise TypeError('Message must be bytes')
c = ctypes.create_string_buffer(len(ctxt) - crypto_box_SEALBYTES)
ret = nacl.crypto_box_seal_open(c, ctxt, ctypes.c_ulonglong(len(ctxt)), pk, sk)
if ret:
raise CryptError('Unable to decrypt message')
return c.raw
# Signing functions
def crypto_sign_keypair():
'''
Generates a signing/verification key pair
'''
vk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)
ret = nacl.crypto_sign_keypair(vk, sk)
if ret:
raise ValueError('Failed to generate keypair')
return vk.raw, sk.raw
def crypto_sign(msg, sk):
'''
Sign the given message witht he given signing key
'''
sig = ctypes.create_string_buffer(len(msg) + crypto_sign_BYTES)
slen = ctypes.pointer(ctypes.c_ulonglong())
ret = nacl.crypto_sign(
sig,
slen,
msg,
ctypes.c_ulonglong(len(msg)),
sk)
if ret:
raise ValueError('Failed to sign message')
return sig.raw
def crypto_sign_seed_keypair(seed):
'''
Computes and returns the secret and verify keys from the given seed
'''
if len(seed) != crypto_sign_SEEDBYTES:
raise ValueError('Invalid Seed')
sk = ctypes.create_string_buffer(crypto_sign_SECRETKEYBYTES)
vk = ctypes.create_string_buffer(crypto_sign_PUBLICKEYBYTES)
ret = nacl.crypto_sign_seed_keypair(vk, sk, seed)
if ret:
raise CryptError('Failed to generate keypair from seed')
return (vk.raw, sk.raw)
def crypto_sign_open(sig, vk):
'''
Verifies the signed message sig using the signer's verification key
'''
msg = ctypes.create_string_buffer(len(sig))
msglen = ctypes.c_ulonglong()
msglenp = ctypes.pointer(msglen)
ret = nacl.crypto_sign_open(
msg,
msglenp,
sig,
ctypes.c_ulonglong(len(sig)),
vk)
if ret:
raise ValueError('Failed to validate message')
return msg.raw[:msglen.value] # pylint: disable=invalid-slice-index
# Authenticated Symmetric Encryption
def crypto_secretbox(message, nonce, key):
"""Encrypts and authenticates a message using the given secret key, and nonce
Args:
message (bytes): a message to encrypt
nonce (bytes): nonce, does not have to be confidential must be
`crypto_secretbox_NONCEBYTES` in length
key (bytes): secret key, must be `crypto_secretbox_KEYBYTES` in
length
Returns:
bytes: the ciphertext
Raises:
ValueError: if arguments' length is wrong or the operation has failed.
"""
if len(key) != crypto_secretbox_KEYBYTES:
raise ValueError('Invalid key')
if len(nonce) != crypto_secretbox_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_secretbox_ZEROBYTES + message
ctxt = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_secretbox(
ctxt, pad, ctypes.c_ulonglong(len(pad)), nonce, key)
if ret:
raise ValueError('Failed to encrypt message')
return ctxt.raw[crypto_secretbox_BOXZEROBYTES:]
def crypto_secretbox_open(ctxt, nonce, key):
"""
Decrypts a ciphertext ctxt given the receivers private key, and senders
public key
"""
if len(key) != crypto_secretbox_KEYBYTES:
raise ValueError('Invalid key')
if len(nonce) != crypto_secretbox_NONCEBYTES:
raise ValueError('Invalid nonce')
pad = b'\x00' * crypto_secretbox_BOXZEROBYTES + ctxt
msg = ctypes.create_string_buffer(len(pad))
ret = nacl.crypto_secretbox_open(
msg,
pad,
ctypes.c_ulonglong(len(pad)),
nonce,
key)
if ret:
raise ValueError('Failed to decrypt message')
return msg.raw[crypto_secretbox_ZEROBYTES:]
# Symmetric Encryption
def crypto_stream(slen, nonce, key):
'''
Generates a stream using the given secret key and nonce
'''
stream = ctypes.create_string_buffer(slen)
ret = nacl.crypto_stream(stream, ctypes.c_ulonglong(slen), nonce, key)
if ret:
raise ValueError('Failed to init stream')
return stream.raw
def crypto_stream_xor(msg, nonce, key):
'''
Encrypts the given message using the given secret key and nonce
The crypto_stream_xor function guarantees that the ciphertext is the
plaintext (xor) the output of crypto_stream. Consequently
crypto_stream_xor can also be used to decrypt
'''
stream = ctypes.create_string_buffer(len(msg))
ret = nacl.crypto_stream_xor(
stream,
msg,
ctypes.c_ulonglong(len(msg)),
nonce,
key)
if ret:
raise ValueError('Failed to init stream')
return stream.raw
# Authentication
def crypto_auth(msg, key):
'''
Constructs a one time authentication token for the given message msg
using a given secret key
'''
tok = ctypes.create_string_buffer(crypto_auth_BYTES)
ret = nacl.crypto_auth(tok, msg, ctypes.c_ulonglong(len(msg)), key)
if ret:
raise ValueError('Failed to auth msg')
return tok.raw[:crypto_auth_BYTES]
def crypto_auth_verify(tok, msg, key):
'''
Verifies that the given authentication token is correct for the given
message and key
'''
ret = nacl.crypto_auth_verify(tok, msg, ctypes.c_ulonglong(len(msg)), key)
if ret:
raise ValueError('Failed to auth msg')
return msg
# One time authentication
def crypto_onetimeauth_primitive():
"""
Return the onetimeauth underlying primitive
Returns:
str: always ``poly1305``
"""
func = nacl.crypto_onetimeauth_primitive
func.restype = ctypes.c_char_p
return func().decode()
def crypto_onetimeauth(message, key):
"""
Constructs a one time authentication token for the given message using
a given secret key
Args:
message (bytes): message to authenticate.
key (bytes): secret key - must be of crypto_onetimeauth_KEYBYTES length.
Returns:
bytes: an authenticator, of crypto_onetimeauth_BYTES length.
Raises:
ValueError: if arguments' length is wrong.
"""
if len(key) != crypto_onetimeauth_KEYBYTES:
raise ValueError('Invalid secret key')
tok = ctypes.create_string_buffer(crypto_onetimeauth_BYTES)
# cannot fail
_ = nacl.crypto_onetimeauth(
tok, message, ctypes.c_ulonglong(len(message)), key)
return tok.raw[:crypto_onetimeauth_BYTES]
def crypto_onetimeauth_verify(token, message, key):
"""
Verifies, in constant time, that ``token`` is a correct authenticator for
the message using the secret key.
Args:
token (bytes): an authenticator of crypto_onetimeauth_BYTES length.
message (bytes): The message to authenticate.
key: key (bytes): secret key - must be of crypto_onetimeauth_KEYBYTES
length.
Returns:
bytes: secret key - must be of crypto_onetimeauth_KEYBYTES length.
Raises:
ValueError: if arguments' length is wrong or verification has failed.
"""
if len(key) != crypto_onetimeauth_KEYBYTES:
raise ValueError('Invalid secret key')
if len(token) != crypto_onetimeauth_BYTES:
raise ValueError('Invalid authenticator')
ret = nacl.crypto_onetimeauth_verify(
token, message, ctypes.c_ulonglong(len(message)), key)
if ret:
raise ValueError('Failed to auth message')
return message
# Hashing
def crypto_hash(msg):
'''
Compute a hash of the given message
'''
hbuf = ctypes.create_string_buffer(crypto_hash_BYTES)
nacl.crypto_hash(hbuf, msg, ctypes.c_ulonglong(len(msg)))
return hbuf.raw
def crypto_hash_sha256(msg):
'''
Compute the sha256 hash of the given message
'''
hbuf = ctypes.create_string_buffer(crypto_hash_sha256_BYTES)
nacl.crypto_hash_sha256(hbuf, msg, ctypes.c_ulonglong(len(msg)))
return hbuf.raw
def crypto_hash_sha512(msg):
'''
Compute the sha512 hash of the given message
'''
hbuf = ctypes.create_string_buffer(crypto_hash_sha512_BYTES)
nacl.crypto_hash_sha512(hbuf, msg, ctypes.c_ulonglong(len(msg)))
return hbuf.raw
# Generic Hash
def crypto_generichash(msg, key=None):
'''
Compute the blake2 hash of the given message with a given key
'''
hbuf = ctypes.create_string_buffer(crypto_generichash_BYTES)
if key:
key_len = len(key)
else:
key_len = 0
nacl.crypto_generichash(
hbuf,
ctypes.c_size_t(len(hbuf)),
msg,
ctypes.c_ulonglong(len(msg)),
key,
ctypes.c_size_t(key_len))
return hbuf.raw
# scalarmult
def crypto_scalarmult_base(n):
'''
Computes and returns the scalar product of a standard group element and an
integer "n".
'''
buf = ctypes.create_string_buffer(crypto_scalarmult_BYTES)
ret = nacl.crypto_scalarmult_base(buf, n)
if ret:
raise CryptError('Failed to compute scalar product')
return buf.raw
# String cmp
def crypto_verify_16(string1, string2):
'''
Compares the first crypto_verify_16_BYTES of the given strings
The time taken by the function is independent of the contents of string1
and string2. In contrast, the standard C comparison function
memcmp(string1,string2,16) takes time that is dependent on the longest
matching prefix of string1 and string2. This often allows for easy
timing attacks.
'''
return not nacl.crypto_verify_16(string1, string2)
def crypto_verify_32(string1, string2):
'''
Compares the first crypto_verify_32_BYTES of the given strings
The time taken by the function is independent of the contents of string1
and string2. In contrast, the standard C comparison function
memcmp(string1,string2,32) takes time that is dependent on the longest
matching prefix of string1 and string2. This often allows for easy
timing attacks.
'''
return not nacl.crypto_verify_32(string1, string2)
def crypto_verify_64(string1, string2):
'''
Compares the first crypto_verify_64_BYTES of the given strings
The time taken by the function is independent of the contents of string1
and string2. In contrast, the standard C comparison function
memcmp(string1,string2,64) takes time that is dependent on the longest
matching prefix of string1 and string2. This often allows for easy
timing attacks.
'''
return not nacl.crypto_verify_64(string1, string2)
def bytes_eq(a, b):
'''
Compares two byte instances with one another. If `a` and `b` have
different lengths, return `False` immediately. Otherwise `a` and `b`
will be compared in constant time.
Return `True` in case `a` and `b` are equal. Otherwise `False`.
Raises :exc:`TypeError` in case `a` and `b` are not both of the type
:class:`bytes`.
'''
if not isinstance(a, bytes) or not isinstance(b, bytes):
raise TypeError('Both arguments must be bytes.')
len_a = len(a)
len_b = len(b)
if len_a != len_b:
return False
return nacl.sodium_memcmp(a, b, len_a) == 0
# Random byte generation
def randombytes(size):
'''
Return a string of random bytes of the given size
'''
buf = ctypes.create_string_buffer(size)
nacl.randombytes(buf, ctypes.c_ulonglong(size))
return buf.raw
def randombytes_buf(size):
'''
Return a string of random bytes of the given size
'''
size = int(size)
buf = ctypes.create_string_buffer(size)
nacl.randombytes_buf(buf, size)
return buf.raw
def randombytes_close():
'''
Close the file descriptor or the handle for the cryptographic service
provider
'''
nacl.randombytes_close()
def randombytes_random():
'''
Return a random 32-bit unsigned value
'''
return nacl.randombytes_random()
def randombytes_stir():
'''
Generate a new key for the pseudorandom number generator
The file descriptor for the entropy source is kept open, so that the
generator can be reseeded even in a chroot() jail.
'''
nacl.randombytes_stir()
def randombytes_uniform(upper_bound):
'''
Return a value between 0 and upper_bound using a uniform distribution
'''
return nacl.randombytes_uniform(upper_bound)
# Utility functions
def sodium_library_version_major():
'''
Return the major version number
'''
return nacl.sodium_library_version_major()
def sodium_library_version_minor():
'''
Return the minor version number
'''
return nacl.sodium_library_version_minor()
def sodium_version_string():
'''
Return the version string
'''
func = nacl.sodium_version_string
func.restype = ctypes.c_char_p
return func()
def crypto_box_seed_keypair(seed):
'''
Computes and returns the public and secret keys from the given seed
'''
if len(seed) != crypto_box_SEEDBYTES:
raise ValueError('Invalid Seed')
pk = ctypes.create_string_buffer(crypto_box_PUBLICKEYBYTES)
sk = ctypes.create_string_buffer(crypto_box_SECRETKEYBYTES)
ret = nacl.crypto_box_seed_keypair(pk, sk, seed)
if ret:
raise CryptError('Failed to generate keypair from seed')
return (pk.raw, sk.raw)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import fnmatch
import imp
import logging
import os
import sys
import zipfile
from telemetry.internal.util import command_line
from telemetry.internal.util import path
from telemetry.internal.util import path_set
try:
from modulegraph import modulegraph # pylint: disable=import-error
except ImportError as err:
modulegraph = None
import_error = err
from core import bootstrap
from core import path_util
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path_util.GetChromiumSrcDir(), '..', deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s', module_path)
if modulegraph is None:
raise import_error
prefixes = [sys.prefix]
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
logging.info('Excluding Prefixes: %r', prefixes)
sys_path = sys.path
sys.path = list(sys_path)
try:
# Load the module to inherit its sys.path modifications.
sys.path.insert(0, os.path.abspath(os.path.dirname(module_path)))
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
graph = modulegraph.ModuleGraph()
graph.run_script(module_path)
# Filter for only imports in Chromium.
for node in graph.nodes():
if not node.filename:
continue
module_path = os.path.realpath(node.filename)
_, incoming_edges = graph.get_edges(node)
message = 'Discovered %s (Imported by: %s)' % (
node.filename, ', '.join(
d.filename for d in incoming_edges
if d is not None and d.filename is not None))
logging.info(message)
# This check is done after the logging/printing above to make sure that
# we also print out the dependency edges that include python packages
# that are not in chromium.
if not path.IsSubpath(module_path, path_util.GetChromiumSrcDir()):
continue
# Exclude any dependencies which exist in the python installation.
if any(path.IsSubpath(module_path, pfx) for pfx in prefixes):
continue
yield module_path
if node.packagepath is not None:
for p in node.packagepath:
yield p
finally:
sys.path = sys_path
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path_util.GetTelemetryDir(),
'telemetry', 'command_line', 'parser.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path_util.GetTelemetryDir(),
'telemetry', 'testing', 'run_tests.py')))
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path_util.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0o0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env vpython\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity', default=0,
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
target_paths = args.positional_args
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print('Zip archive written to %s.' % args.zip)
else:
print('\n'.join(sorted(dependencies)))
return 0
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class OperationalizationClustersOperations(object):
"""OperationalizationClustersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The version of the Microsoft.MachineLearningCompute resource provider API to use. Constant value: "2017-08-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-08-01-preview"
self.config = config
def create_or_update(
self, resource_group_name, cluster_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update an operationalization cluster.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: Parameters supplied to create or update an
Operationalization cluster.
:type parameters:
~azure.mgmt.machinelearningcompute.models.OperationalizationCluster
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
OperationalizationCluster or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.machinelearningcompute.models.OperationalizationCluster]
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseWrapperException<azure.mgmt.machinelearningcompute.models.ErrorResponseWrapperException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'OperationalizationCluster')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
raise models.ErrorResponseWrapperException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationalizationCluster', response)
if response.status_code == 201:
deserialized = self._deserialize('OperationalizationCluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Gets the operationalization cluster resource view. Note that the
credentials are not returned by this call. Call ListKeys to get them.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationalizationCluster or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.machinelearningcompute.models.OperationalizationCluster or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseWrapperException<azure.mgmt.machinelearningcompute.models.ErrorResponseWrapperException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseWrapperException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationalizationCluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, cluster_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""The PATCH operation can be used to update only the tags for a cluster.
Use PUT operation to update other properties.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param tags: Gets or sets a list of key value pairs that describe the
resource. These tags can be used in viewing and grouping this resource
(across resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater in length than 128
characters and a value no greater in length than 256 characters.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationalizationCluster or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.machinelearningcompute.models.OperationalizationCluster or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseWrapperException<azure.mgmt.machinelearningcompute.models.ErrorResponseWrapperException>`
"""
parameters = models.OperationalizationClusterUpdateParameters(tags=tags)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'OperationalizationClusterUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseWrapperException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationalizationCluster', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, cluster_name, delete_all=None, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified cluster.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param delete_all: If true, deletes all resources associated with this
cluster.
:type delete_all: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseWrapperException<azure.mgmt.machinelearningcompute.models.ErrorResponseWrapperException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if delete_all is not None:
query_parameters['deleteAll'] = self._serialize.query("delete_all", delete_all, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
raise models.ErrorResponseWrapperException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_keys(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Gets the credentials for the specified cluster such as Storage, ACR and
ACS credentials. This is a long running operation because it fetches
keys from dependencies.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationalizationClusterCredentials or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.machinelearningcompute.models.OperationalizationClusterCredentials
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}/listKeys'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationalizationClusterCredentials', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_system_services_updates_available(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Checks if updates are available for system services in the cluster.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckSystemServicesUpdatesAvailableResponse or
ClientRawResponse if raw=true
:rtype:
~azure.mgmt.machinelearningcompute.models.CheckSystemServicesUpdatesAvailableResponse
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}/checkSystemServicesUpdatesAvailable'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckSystemServicesUpdatesAvailableResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_system_services(
self, resource_group_name, cluster_name, custom_headers=None, raw=False, **operation_config):
"""Updates system services in a cluster.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
UpdateSystemServicesResponse or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.machinelearningcompute.models.UpdateSystemServicesResponse]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters/{clusterName}/updateSystemServices'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str', max_length=90, min_length=1, pattern=r'^[a-zA-Z][-\w\._\(\)]+[a-zA-Z0-9]$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('UpdateSystemServicesResponse', response)
header_dict = {
'Location': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_resource_group(
self, resource_group_name, skiptoken=None, custom_headers=None, raw=False, **operation_config):
"""Gets the clusters in the specified resource group.
:param resource_group_name: Name of the resource group in which the
cluster is located.
:type resource_group_name: str
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of OperationalizationCluster
:rtype:
~azure.mgmt.machinelearningcompute.models.OperationalizationClusterPaged[~azure.mgmt.machinelearningcompute.models.OperationalizationCluster]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningCompute/operationalizationClusters'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.OperationalizationClusterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.OperationalizationClusterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_subscription_id(
self, skiptoken=None, custom_headers=None, raw=False, **operation_config):
"""Gets the operationalization clusters in the specified subscription.
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of OperationalizationCluster
:rtype:
~azure.mgmt.machinelearningcompute.models.OperationalizationClusterPaged[~azure.mgmt.machinelearningcompute.models.OperationalizationCluster]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningCompute/operationalizationClusters'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.OperationalizationClusterPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.OperationalizationClusterPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
from __future__ import division
def sublists_for_phys(slice_regressor_list, in_files):
# no need to assume a sorted list
slice_regressor_list.sort()
nr_phys_regressors_per_file = len(slice_regressor_list) / len(in_files)
slice_regressor_lists = []
if round(nr_phys_regressors_per_file) == nr_phys_regressors_per_file:
for x in range(len(in_files)):
slice_regressor_lists.append(slice_regressor_list[x*nr_phys_regressors_per_file:(x+1)*nr_phys_regressors_per_file])
else:
print('Unequal number of physiology regressors for retroicor. Check \n%s'%str(slice_regressor_list))
return slice_regressor_lists
def create_whole_brain_GLM_workflow(analysis_info, name = 'GLM'):
import nipype.pipeline as pe
from nipype.interfaces.utility import Function, Merge, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from utils.GLM import fit_glm_nuisances_single_file, fit_FIR_nuisances_all_files
imports = ['from utils.behavior import behavior_timing']
input_node = pe.Node(IdentityInterface(
fields=['preprocessed_directory',
'sub_id'
]), name='inputspec')
# i/o node
datasource_templates = dict(
example_func='{sub_id}/reg/example_func.nii.gz',
# predictable experiment has no physiology
predictable_mapper_in_file='{sub_id}/psc/*-predictable_mapper_1_*.nii.gz',
predictable_mapper_tsv_file='{sub_id}/events/tsv/*-predictable_mapper_1_*.tsv',
predictable_mapper_mcf_par='{sub_id}/mcf/ext_motion_pars/*-predictable_mapper_1_*.par',
# predictable reward experiment needs behavior files and moco but no physio
predictable_in_files='{sub_id}/psc/*-predictable_reward_*.nii.gz',
predictable_behavior_tsv_file='{sub_id}/events/tsv/*-predictable_reward_*.tsv',
predictable_mcf_pars='{sub_id}/mcf/ext_motion_pars/*-predictable_reward_*.par',
# unpredictable experiment has physiology but no behavior because: block design
unpredictable_mapper_in_file='{sub_id}/psc/*-unpredictable_mapper_1_*.nii.gz',
unpredictable_mapper_physio_files='{sub_id}/phys/evs/*-unpredictable_mapper_1_*.nii.gz',
unpredictable_mapper_mcf_par='{sub_id}/mcf/ext_motion_pars/*-unpredictable_mapper_1_*.par',
# unpredictable reward experiment needs behavior files, moco and physio
unpredictable_in_files='{sub_id}/psc/*-unpredictable_reward_*.nii.gz',
unpredictable_behavior_tsv_file='{sub_id}/events/tsv/*-unpredictable_reward_*.tsv',
unpredictable_physio_files='{sub_id}/phys/evs/*-unpredictable_reward_*.nii.gz',
unpredictable_mcf_pars='{sub_id}/mcf/ext_motion_pars/*-unpredictable_reward_*.par',
# variable reward experiment needs behavior files, moco and physio
variable_in_files='{sub_id}/psc/*-variable_*_reward_*.nii.gz',
variable_behavior_tsv_file='{sub_id}/events/tsv/*-variable_*_reward_*.tsv',
variable_physio_files='{sub_id}/phys/evs/*-variable_*_reward_*.nii.gz',
variable_mcf_pars='{sub_id}/mcf/ext_motion_pars/*-variable_*_reward_*.par'
)
datasource = pe.Node(SelectFiles(datasource_templates, sort_filelist = True, raise_on_empty = False),
name = 'datasource')
unpredictable_split_phys_list = pe.Node(Function(input_names=['slice_regressor_list', 'in_files'],
output_names=['slice_regressor_lists'],
function=sublists_for_phys),
name='unpredictable_split_phys_list')
variable_split_phys_list = pe.Node(Function(input_names=['slice_regressor_list', 'in_files'],
output_names=['slice_regressor_lists'],
function=sublists_for_phys),
name='variable_split_phys_list')
unpredictable_GLM = pe.Node(Function(input_names=['in_file',
'slice_regressor_list',
'vol_regressors',
'num_components',
'method',
'mapper',
'dm_upscale_factor',
'tsv_behavior_file'],
output_names=['out_files'],
function=fit_glm_nuisances_single_file),
name='unpredictable_GLM')
unpredictable_GLM.inputs.mapper = 'unpredictable'
unpredictable_GLM.inputs.num_components = 6
unpredictable_GLM.inputs.method = 'PCA'
unpredictable_GLM.inputs.dm_upscale_factor = 10
predictable_GLM = pe.Node(Function(input_names=['in_file',
'slice_regressor_list',
'vol_regressors',
'num_components',
'method',
'mapper',
'dm_upscale_factor',
'tsv_behavior_file'],
output_names=['out_files'],
function=fit_glm_nuisances_single_file),
name='predictable_GLM')
predictable_GLM.inputs.mapper = 'predictable'
predictable_GLM.inputs.num_components = 4 # no physio, just motion correction nuisances
predictable_GLM.inputs.method = 'PCA'
predictable_GLM.inputs.dm_upscale_factor = 10
unpredictable_FIR= pe.Node(Function(input_names=[
'experiment',
'example_func',
'in_files',
'slice_regressor_lists',
'vol_regressor_list',
'behavior_file_list',
'fir_frequency',
'fir_interval',
'num_components',
'method'
],
output_names=['out_files'],
function=fit_FIR_nuisances_all_files,
imports=imports),
name='unpredictable_FIR',)
unpredictable_FIR.inputs.fir_frequency = analysis_info['fir_frequency']
unpredictable_FIR.inputs.fir_interval = analysis_info['fir_interval']
unpredictable_FIR.inputs.num_components = 6
unpredictable_FIR.inputs.method = 'PCA'
unpredictable_FIR.inputs.experiment = 'unpredictable'
predictable_FIR= pe.Node(Function(input_names=[
'experiment',
'example_func',
'in_files',
'slice_regressor_lists',
'vol_regressor_list',
'behavior_file_list',
'fir_frequency',
'fir_interval',
'num_components',
'method'
],
output_names=['out_files'],
function=fit_FIR_nuisances_all_files,
imports=imports),
name='predictable_FIR')
predictable_FIR.inputs.fir_frequency = analysis_info['fir_frequency']
predictable_FIR.inputs.fir_interval = analysis_info['fir_interval']
predictable_FIR.inputs.num_components = 6
predictable_FIR.inputs.method = 'PCA'
predictable_FIR.inputs.experiment = 'predictable'
predictable_FIR.inputs.slice_regressor_lists = [[]] # no physio regressors
variable_FIR= pe.Node(Function(input_names=[
'experiment',
'example_func',
'in_files',
'slice_regressor_lists',
'vol_regressor_list',
'behavior_file_list',
'fir_frequency',
'fir_interval',
'num_components',
'method'
],
output_names=['out_files'],
function=fit_FIR_nuisances_all_files,
imports=imports),
name='variable_FIR')
variable_FIR.inputs.fir_frequency = analysis_info['fir_frequency']
variable_FIR.inputs.fir_interval = analysis_info['fir_interval']
variable_FIR.inputs.num_components = 6
variable_FIR.inputs.method = 'PCA'
variable_FIR.inputs.experiment = 'variable'
# the actual top-level workflow
whole_brain_analysis_workflow = pe.Workflow(name=name)
whole_brain_analysis_workflow.connect(input_node, 'preprocessed_directory', datasource, 'base_directory')
whole_brain_analysis_workflow.connect(input_node, 'sub_id', datasource, 'sub_id')
# predictable mapper GLM
whole_brain_analysis_workflow.connect(datasource, 'predictable_mapper_in_file', predictable_GLM, 'in_file')
whole_brain_analysis_workflow.connect(datasource, 'predictable_mapper_mcf_par', predictable_GLM, 'vol_regressors')
whole_brain_analysis_workflow.connect(datasource, 'predictable_mapper_tsv_file', predictable_GLM, 'tsv_behavior_file')
# predictable reward FIR
whole_brain_analysis_workflow.connect(datasource, 'predictable_in_files', predictable_FIR, 'in_files')
whole_brain_analysis_workflow.connect(datasource, 'predictable_mcf_pars', predictable_FIR, 'vol_regressor_list')
whole_brain_analysis_workflow.connect(datasource, 'predictable_behavior_tsv_file', predictable_FIR, 'behavior_file_list')
whole_brain_analysis_workflow.connect(datasource, 'example_func', predictable_FIR, 'example_func')
# unpredictable mapper GLM
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_mapper_in_file', unpredictable_GLM, 'in_file')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_mapper_mcf_par', unpredictable_GLM, 'vol_regressors')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_mapper_physio_files', unpredictable_GLM, 'slice_regressor_list')
# unpredictable reward FIR; first split the 1D slice regressor list to 2D
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_physio_files', unpredictable_split_phys_list, 'slice_regressor_list')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_in_files', unpredictable_split_phys_list, 'in_files')
whole_brain_analysis_workflow.connect(unpredictable_split_phys_list, 'slice_regressor_lists', unpredictable_FIR, 'slice_regressor_lists')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_in_files', unpredictable_FIR, 'in_files')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_mcf_pars', unpredictable_FIR, 'vol_regressor_list')
whole_brain_analysis_workflow.connect(datasource, 'unpredictable_behavior_tsv_file', unpredictable_FIR, 'behavior_file_list')
whole_brain_analysis_workflow.connect(datasource, 'example_func', unpredictable_FIR, 'example_func')
# variable reward FIR; first split the 1D slice regressor list to 2D
whole_brain_analysis_workflow.connect(datasource, 'variable_physio_files', variable_split_phys_list, 'slice_regressor_list')
whole_brain_analysis_workflow.connect(datasource, 'variable_in_files', variable_split_phys_list, 'in_files')
whole_brain_analysis_workflow.connect(variable_split_phys_list, 'slice_regressor_lists', variable_FIR, 'slice_regressor_lists')
whole_brain_analysis_workflow.connect(datasource, 'variable_in_files', variable_FIR, 'in_files')
whole_brain_analysis_workflow.connect(datasource, 'variable_mcf_pars', variable_FIR, 'vol_regressor_list')
whole_brain_analysis_workflow.connect(datasource, 'variable_behavior_tsv_file', variable_FIR, 'behavior_file_list')
whole_brain_analysis_workflow.connect(datasource, 'example_func', variable_FIR, 'example_func')
# datasink
datasink = pe.Node(DataSink(), name='sinker')
datasink.inputs.parameterization = False
whole_brain_analysis_workflow.connect(input_node, 'preprocessed_directory', datasink, 'base_directory')
whole_brain_analysis_workflow.connect(input_node, 'sub_id', datasink, 'container')
whole_brain_analysis_workflow.connect(predictable_GLM, 'out_files', datasink, 'GLM.@predictable_GLM')
whole_brain_analysis_workflow.connect(predictable_FIR, 'out_files', datasink, 'GLM.@predictable_FIR')
whole_brain_analysis_workflow.connect(unpredictable_GLM, 'out_files', datasink, 'GLM.@unpredictable_GLM')
whole_brain_analysis_workflow.connect(unpredictable_FIR, 'out_files', datasink, 'GLM.@unpredictable_FIR')
whole_brain_analysis_workflow.connect(variable_FIR, 'out_files', datasink, 'GLM.@variable_FIR')
return whole_brain_analysis_workflow
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.